diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td index 07a8724b6f33d..96d8300a0faf3 100644 --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -1013,9 +1013,9 @@ let ManualCodegen = [{ }] in { let HasFRMRoundModeOp = true in { // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions - defm vfadd : RVVFloatingBinBuiltinSetRoundingMode; - defm vfsub : RVVFloatingBinBuiltinSetRoundingMode; - defm vfrsub : RVVFloatingBinVFBuiltinSetRoundingMode; + defm vfadd : RVVFloatingBinBuiltinSetRoundingMode; + defm vfsub : RVVFloatingBinBuiltinSetRoundingMode; + defm vfrsub : RVVFloatingBinVFBuiltinSetRoundingMode; // 13.3. Vector Widening Floating-Point Add/Subtract Instructions // Widening FP add/subtract, 2*SEW = 2*SEW +/- SEW @@ -1023,14 +1023,14 @@ let ManualCodegen = [{ defm vfwsub : RVVFloatingWidenOp0BinBuiltinSetRoundingMode; // 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions - defm vfmul : RVVFloatingBinBuiltinSetRoundingMode; + defm vfmul : RVVFloatingBinBuiltinSetRoundingMode; defm vfdiv : RVVFloatingBinBuiltinSetRoundingMode; defm vfrdiv : RVVFloatingBinVFBuiltinSetRoundingMode; } // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions - defm vfadd : RVVFloatingBinBuiltinSet; - defm vfsub : RVVFloatingBinBuiltinSet; - defm vfrsub : RVVFloatingBinVFBuiltinSet; + defm vfadd : RVVFloatingBinBuiltinSet; + defm vfsub : RVVFloatingBinBuiltinSet; + defm vfrsub : RVVFloatingBinVFBuiltinSet; // 13.3. Vector Widening Floating-Point Add/Subtract Instructions // Widening FP add/subtract, 2*SEW = 2*SEW +/- SEW @@ -1038,7 +1038,7 @@ let ManualCodegen = [{ defm vfwsub : RVVFloatingWidenOp0BinBuiltinSet; // 13.4. Vector Single-Width Floating-Point Multiply/Divide Instructions - defm vfmul : RVVFloatingBinBuiltinSet; + defm vfmul : RVVFloatingBinBuiltinSet; defm vfdiv : RVVFloatingBinBuiltinSet; defm vfrdiv : RVVFloatingBinVFBuiltinSet; } @@ -1065,6 +1065,10 @@ let ManualCodegen = [{ defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "x", [["vv", "w", "wvvu"], ["vf", "w", "wveu"]]>; + let RequiredFeatures = ["zvfbfa"] in + defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "y", + [["vv", "vw", "wvvu"], + ["vf", "vw", "wveu"]]>; } } // 13.3. Vector Widening Floating-Point Add/Subtract Instructions @@ -1081,6 +1085,10 @@ let ManualCodegen = [{ defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "x", [["vv", "w", "wvv"], ["vf", "w", "wve"]]>; + let RequiredFeatures = ["zvfbfa"] in + defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "y", + [["vv", "vw", "wvv"], + ["vf", "vw", "wve"]]>; } } } @@ -1170,6 +1178,8 @@ let ManualCodegen = [{ defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "fd", [["v", "v", "vvu"]]>; let RequiredFeatures = ["zvfh"] in defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "x", [["v", "v", "vvu"]]>; + let RequiredFeatures = ["zvfbfa"] in + defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "y", [["v", "v", "vvu"]]>; } // 13.8. Vector Floating-Point Square-Root Instruction defm vfsqrt : RVVOutBuiltinSet<"vfsqrt", "fd", [["v", "v", "vv"]]>; @@ -1180,21 +1190,26 @@ let ManualCodegen = [{ defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "fd", [["v", "v", "vv"]]>; let RequiredFeatures = ["zvfh"] in defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "x", [["v", "v", "vv"]]>; + let RequiredFeatures = ["zvfbfa"] in + defm vfrec7 : RVVOutBuiltinSet<"vfrec7", "y", [["v", "v", "vv"]]>; } // 13.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction defm vfrsqrt7 : RVVOutBuiltinSet<"vfrsqrt7", "fd", [["v", "v", "vv"]]>; let RequiredFeatures = ["zvfh"] in defm vfrsqrt7 : RVVOutBuiltinSet<"vfrsqrt7", "x", [["v", "v", "vv"]]>; +let RequiredFeatures = ["zvfbfa"] in + defm vfrsqrt7 : RVVOutBuiltinSet<"vfrsqrt7", "y", [["v", "v", "vv"]]>; + // 13.11. Vector Floating-Point MIN/MAX Instructions -defm vfmin : RVVFloatingBinBuiltinSet; -defm vfmax : RVVFloatingBinBuiltinSet; +defm vfmin : RVVFloatingBinBuiltinSet; +defm vfmax : RVVFloatingBinBuiltinSet; // 13.12. Vector Floating-Point Sign-Injection Instructions -defm vfsgnj : RVVFloatingBinBuiltinSet; -defm vfsgnjn : RVVFloatingBinBuiltinSet; -defm vfsgnjx : RVVFloatingBinBuiltinSet; +defm vfsgnj : RVVFloatingBinBuiltinSet; +defm vfsgnjn : RVVFloatingBinBuiltinSet; +defm vfsgnjx : RVVFloatingBinBuiltinSet; } defm vfneg_v : RVVPseudoVFUnaryBuiltin<"vfsgnjn", "fd">; let RequiredFeatures = ["zvfh"] in @@ -1219,6 +1234,8 @@ let UnMaskedPolicyScheme = HasPassthruOperand in { defm vfclass : RVVOp0BuiltinSet<"vfclass", "fd", [["v", "Uv", "Uvv"]]>; let RequiredFeatures = ["zvfh"] in defm vfclass : RVVOp0BuiltinSet<"vfclass", "x", [["v", "Uv", "Uvv"]]>; +let RequiredFeatures = ["zvfbfa"] in + defm vfclass : RVVOp0BuiltinSet<"vfclass", "y", [["v", "vUv", "Uvv"]]>; } // 13.15. Vector Floating-Point Merge Instruction @@ -1239,6 +1256,9 @@ let HasMasked = false, let RequiredFeatures = ["zvfh"] in defm vfmerge : RVVOutOp1BuiltinSet<"vfmerge", "x", [["vfm", "v", "vvem"]]>; + let RequiredFeatures = ["zvfbfa"] in + defm vfmerge : RVVOutOp1BuiltinSet<"vfmerge", "y", + [["vfm", "v", "vvem"]]>; } // 13.16. Vector Floating-Point Move Instruction @@ -1252,6 +1272,9 @@ let HasMasked = false, let RequiredFeatures = ["zvfh"] in defm vfmv_v : RVVOutBuiltinSet<"vfmv_v_f", "x", [["f", "v", "ve"]]>; + let RequiredFeatures = ["zvfbfa"] in + defm vfmv_v : RVVOutBuiltinSet<"vfmv_v_f", "y", + [["f", "v", "ve"]]>; } // 13.17. Single-Width Floating-Point/Integer Type-Convert Instructions @@ -1287,10 +1310,16 @@ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { defm : RVVConvBuiltinSet<"vfwcvt_f_x_v", "c", [["Fw", "Fwv"]]>; } } + let RequiredFeatures = ["zvfbfa"], OverloadedName = "vfwcvt_f_bf16" in { + defm : RVVConvBuiltinSet<"vfwcvt_f_xu_v", "c", [["Yw", "YwUv"]]>; + defm : RVVConvBuiltinSet<"vfwcvt_f_x_v", "c", [["Yw", "Ywv"]]>; + } let OverloadedName = "vfwcvt_f" in { defm : RVVConvBuiltinSet<"vfwcvt_f_f_v", "f", [["w", "wv"]]>; let RequiredFeatures = ["zvfhmin"] in defm : RVVConvBuiltinSet<"vfwcvt_f_f_v", "x", [["w", "wv"]]>; + let RequiredFeatures = ["zvfbfa"] in + defm : RVVConvBuiltinSet<"vfwcvt_f_f_v", "y", [["vw", "wv"]]>; } } @@ -1300,17 +1329,23 @@ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in { defm : RVVConvBuiltinSet<"vfncvt_rtz_xu_f_w", "si", [["Uv", "UvFw"]]>; let RequiredFeatures = ["zvfh"] in defm : RVVConvBuiltinSet<"vfncvt_rtz_xu_f_w", "c", [["Uv", "UvFw"]]>; + let RequiredFeatures = ["zvfbfa"] in + defm : RVVConvBuiltinSet<"vfncvt_rtz_xu_f_w", "c", [["YwUv", "UvYw"]]>; } let OverloadedName = "vfncvt_rtz_x" in { defm : RVVConvBuiltinSet<"vfncvt_rtz_x_f_w", "si", [["Iv", "IvFw"]]>; let RequiredFeatures = ["zvfh"] in defm : RVVConvBuiltinSet<"vfncvt_rtz_x_f_w", "c", [["Iv", "IvFw"]]>; + let RequiredFeatures = ["zvfbfa"] in + defm : RVVConvBuiltinSet<"vfncvt_rtz_x_f_w", "c", [["YwIv", "IvYw"]]>; } let OverloadedName = "vfncvt_rod_f" in { defm : RVVConvBuiltinSet<"vfncvt_rod_f_f_w", "f", [["v", "vw"]]>; let RequiredFeatures = ["zvfh"] in defm : RVVConvBuiltinSet<"vfncvt_rod_f_f_w", "x", [["v", "vw"]]>; } + let RequiredFeatures = ["zvfbfa"], OverloadedName = "vfncvt_rod_f_bf16" in + defm : RVVConvBuiltinSet<"vfncvt_rod_f_f_w", "y", [["v", "vw"]]>; } // Zvfbfmin - Vector convert BF16 to FP32 @@ -1363,11 +1398,15 @@ let ManualCodegen = [{ defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "si", [["Iv", "IvFwu"]]>; let RequiredFeatures = ["zvfh"] in defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "c", [["Iv", "IvFwu"]]>; + let RequiredFeatures = ["zvfbfa"] in + defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "c", [["YwIv", "IvYwu"]]>; } let OverloadedName = "vfncvt_xu" in { defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "si", [["Uv", "UvFwu"]]>; let RequiredFeatures = ["zvfh"] in defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "c", [["Uv", "UvFwu"]]>; + let RequiredFeatures = ["zvfbfa"] in + defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "c", [["YwUv", "UvYwu"]]>; } let OverloadedName = "vfncvt_f" in { defm : RVVConvBuiltinSet<"vfncvt_f_x_w", "f", [["v", "vIwu"]]>; @@ -1382,6 +1421,8 @@ let ManualCodegen = [{ let RequiredFeatures = ["zvfhmin"] in defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "x", [["v", "vwu"]]>; } + let RequiredFeatures = ["zvfbfa"], OverloadedName = "vfncvt_f_bf16" in + defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "y", [["v", "vwu"]]>; } // Zvfbfmin - Vector convert FP32 to BF16 @@ -1430,11 +1471,15 @@ let ManualCodegen = [{ defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "si", [["Iv", "IvFw"]]>; let RequiredFeatures = ["zvfh"] in defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "c", [["Iv", "IvFw"]]>; + let RequiredFeatures = ["zvfbfa"] in + defm : RVVConvBuiltinSet<"vfncvt_x_f_w", "c", [["YwIv", "IvYw"]]>; } let OverloadedName = "vfncvt_xu" in { defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "si", [["Uv", "UvFw"]]>; let RequiredFeatures = ["zvfh"] in defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "c", [["Uv", "UvFw"]]>; + let RequiredFeatures = ["zvfbfa"] in + defm : RVVConvBuiltinSet<"vfncvt_xu_f_w", "c", [["YwUv", "UvYw"]]>; } let OverloadedName = "vfncvt_f" in { defm : RVVConvBuiltinSet<"vfncvt_f_x_w", "f", [["v", "vIw"]]>; @@ -1449,6 +1494,8 @@ let ManualCodegen = [{ let RequiredFeatures = ["zvfhmin"] in defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "x", [["v", "vw"]]>; } + let RequiredFeatures = ["zvfbfa"], OverloadedName = "vfncvt_f_bf16" in + defm : RVVConvBuiltinSet<"vfncvt_f_f_w", "y", [["v", "vw"]]>; } // Zvfbfmin - Vector convert FP32 to BF16 @@ -1578,6 +1625,9 @@ let HasMasked = false, MaskedPolicyScheme = NonePolicy in { let RequiredFeatures = ["zvfh"] in defm vfmv_f : RVVOp0BuiltinSet<"vfmv_f_s", "x", [["s", "ve", "ev"]]>; + let RequiredFeatures = ["zvfbfa"] in + defm vfmv_f : RVVOp0BuiltinSet<"vfmv_f_s", "y", + [["s", "ve", "ev"]]>; } let OverloadedName = "vfmv_s", UnMaskedPolicyScheme = HasPassthruOperand, @@ -1589,6 +1639,9 @@ let HasMasked = false, MaskedPolicyScheme = NonePolicy in { defm vfmv_s : RVVOutBuiltinSet<"vfmv_s_f", "x", [["f", "v", "ve"], ["x", "Uv", "UvUe"]]>; + let RequiredFeatures = ["zvfbfa"] in + defm vfmv_s : RVVOutBuiltinSet<"vfmv_s_f", "y", + [["f", "v", "ve"]]>; } } @@ -1601,11 +1654,11 @@ defm vslidedown : RVVSlideDownBuiltinSet; // 16.3.3. Vector Slide1up Instructions let UnMaskedPolicyScheme = HasPassthruOperand in { defm vslide1up : RVVSlideOneBuiltinSet; -defm vfslide1up : RVVFloatingBinVFBuiltinSet; +defm vfslide1up : RVVFloatingBinVFBuiltinSet; // 16.3.4. Vector Slide1down Instruction defm vslide1down : RVVSlideOneBuiltinSet; -defm vfslide1down : RVVFloatingBinVFBuiltinSet; +defm vfslide1down : RVVFloatingBinVFBuiltinSet; // 16.4. Vector Register Gather Instructions // signed and floating type diff --git a/clang/include/clang/Basic/riscv_vector_common.td b/clang/include/clang/Basic/riscv_vector_common.td index 767bcee7b1596..2a2a04998366a 100644 --- a/clang/include/clang/Basic/riscv_vector_common.td +++ b/clang/include/clang/Basic/riscv_vector_common.td @@ -470,6 +470,10 @@ let HasMaskedOffOperand = false in { defm "" : RVVOutOp1BuiltinSet; + let RequiredFeatures = ["zvfbfa"] in + defm "" : RVVOutOp1BuiltinSet; } multiclass RVVFloatingTerBuiltinSetRoundingMode { defm "" : RVVOutOp1BuiltinSet; + let RequiredFeatures = ["zvfbfa"] in + defm "" : RVVOutOp1BuiltinSet; } } @@ -491,6 +499,10 @@ let HasMaskedOffOperand = false, Log2LMUL = [-2, -1, 0, 1, 2] in { defm "" : RVVOutOp1Op2BuiltinSet; + let RequiredFeatures = ["zvfbfa"] in + defm "" : RVVOutOp1Op2BuiltinSet; } multiclass RVVFloatingWidenTerBuiltinSetRoundingMode { defm "" : RVVOutOp1Op2BuiltinSet; + let RequiredFeatures = ["zvfbfa"] in + defm "" : RVVOutOp1Op2BuiltinSet; } } -multiclass RVVFloatingBinBuiltinSet { +multiclass RVVFloatingBinBuiltinSet { defm "" : RVVOutOp1BuiltinSet; @@ -511,9 +527,15 @@ multiclass RVVFloatingBinBuiltinSet { defm "" : RVVOutOp1BuiltinSet; + if HasBF then { + let RequiredFeatures = ["zvfbfa"] in + defm "" : RVVOutOp1BuiltinSet; + } } -multiclass RVVFloatingBinBuiltinSetRoundingMode { +multiclass RVVFloatingBinBuiltinSetRoundingMode { defm "" : RVVOutOp1BuiltinSet; @@ -521,22 +543,38 @@ multiclass RVVFloatingBinBuiltinSetRoundingMode { defm "" : RVVOutOp1BuiltinSet; + if HasBF then { + let RequiredFeatures = ["zvfbfa"] in + defm "" : RVVOutOp1BuiltinSet; + } } -multiclass RVVFloatingBinVFBuiltinSet { +multiclass RVVFloatingBinVFBuiltinSet { defm "" : RVVOutOp1BuiltinSet; let RequiredFeatures = ["zvfh"] in defm "" : RVVOutOp1BuiltinSet; + if HasBF then { + let RequiredFeatures = ["zvfbfa"] in + defm "" : RVVOutOp1BuiltinSet; + } } -multiclass RVVFloatingBinVFBuiltinSetRoundingMode { +multiclass RVVFloatingBinVFBuiltinSetRoundingMode { defm "" : RVVOutOp1BuiltinSet; let RequiredFeatures = ["zvfh"] in defm "" : RVVOutOp1BuiltinSet; + if HasBF then { + let RequiredFeatures = ["zvfbfa"] in + defm "" : RVVOutOp1BuiltinSet; + } } multiclass RVVFloatingMaskOutBuiltinSet { @@ -547,6 +585,10 @@ multiclass RVVFloatingMaskOutBuiltinSet { defm "" : RVVOp0Op1BuiltinSet; + let RequiredFeatures = ["zvfbfa"] in + defm "" : RVVOp0Op1BuiltinSet; } multiclass RVVFloatingMaskOutVFBuiltinSet @@ -748,6 +790,10 @@ multiclass RVVFloatingWidenBinBuiltinSet { defm "" : RVVWidenBuiltinSet; + let RequiredFeatures = ["zvfbfa"] in + defm "" : RVVWidenBuiltinSet; } multiclass RVVFloatingWidenBinBuiltinSetRoundingMode { @@ -758,6 +804,10 @@ multiclass RVVFloatingWidenBinBuiltinSetRoundingMode { defm "" : RVVWidenBuiltinSet; + let RequiredFeatures = ["zvfbfa"] in + defm "" : RVVWidenBuiltinSet; } multiclass RVVFloatingWidenOp0BinBuiltinSet { @@ -768,6 +818,10 @@ multiclass RVVFloatingWidenOp0BinBuiltinSet { defm "" : RVVWidenWOp0BuiltinSet; + let RequiredFeatures = ["zvfbfa"] in + defm "" : RVVWidenWOp0BuiltinSet; } multiclass RVVFloatingWidenOp0BinBuiltinSetRoundingMode { @@ -778,4 +832,8 @@ multiclass RVVFloatingWidenOp0BinBuiltinSetRoundingMode { defm "" : RVVWidenWOp0BuiltinSet; + let RequiredFeatures = ["zvfbfa"] in + defm "" : RVVWidenWOp0BuiltinSet; } diff --git a/clang/lib/Sema/SemaRISCV.cpp b/clang/lib/Sema/SemaRISCV.cpp index 3ba93ff98898b..c5ef0d535628d 100644 --- a/clang/lib/Sema/SemaRISCV.cpp +++ b/clang/lib/Sema/SemaRISCV.cpp @@ -1464,7 +1464,8 @@ void SemaRISCV::checkRVVTypeSupport(QualType Ty, SourceLocation Loc, Decl *D, } else if (Info.ElementType->isBFloat16Type() && !FeatureMap.lookup("zvfbfmin") && - !FeatureMap.lookup("xandesvbfhcvt")) + !FeatureMap.lookup("xandesvbfhcvt") && + !FeatureMap.lookup("experimental-zvfbfa")) if (DeclareAndesVectorBuiltins) { Diag(Loc, diag::err_riscv_type_requires_extension, D) << Ty << "zvfbfmin or xandesvbfhcvt"; diff --git a/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/clang/lib/Support/RISCVVIntrinsicUtils.cpp index 5a4e805d4a9d1..dad3d0dae423a 100644 --- a/clang/lib/Support/RISCVVIntrinsicUtils.cpp +++ b/clang/lib/Support/RISCVVIntrinsicUtils.cpp @@ -654,6 +654,9 @@ PrototypeDescriptor::parsePrototypeDescriptor( case 'F': TM |= TypeModifier::Float; break; + case 'Y': + TM |= TypeModifier::BFloat; + break; case 'S': TM |= TypeModifier::LMUL1; break; @@ -704,6 +707,8 @@ void RVVType::applyModifier(const PrototypeDescriptor &Transformer) { ElementBitwidth *= 2; LMUL.MulLog2LMUL(1); Scale = LMUL.getScale(ElementBitwidth); + if (ScalarType == ScalarTypeKind::BFloat) + ScalarType = ScalarTypeKind::Float; break; case VectorTypeModifier::Widening4XVector: ElementBitwidth *= 4; diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfadd.c new file mode 100644 index 0000000000000..d7734e05a8aaa --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfadd.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m8_m(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfclass.c new file mode 100644 index 0000000000000..68814f4672d05 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfclass.c @@ -0,0 +1,134 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf4_u16mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4(vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16mf4_u16mf4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf2_u16mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2(vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16mf2_u16mf2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m1_u16m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfclass_v_bf16m1_u16m1(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16m1_u16m1(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m2_u16m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfclass_v_bf16m2_u16m2(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16m2_u16m2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m4_u16m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv16bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfclass_v_bf16m4_u16m4(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16m4_u16m4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m8_u16m8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv32bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfclass_v_bf16m8_u16m8(vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16m8_u16m8(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf4_u16mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_m(vbool64_t vm, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfclass_v_bf16mf4_u16mf4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf2_u16mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_m(vbool32_t vm, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfclass_v_bf16mf2_u16mf2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m1_u16m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfclass_v_bf16m1_u16m1_m(vbool16_t vm, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfclass_v_bf16m1_u16m1_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m2_u16m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfclass_v_bf16m2_u16m2_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfclass_v_bf16m2_u16m2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m4_u16m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfclass_v_bf16m4_u16m4_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfclass_v_bf16m4_u16m4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m8_u16m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv32bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfclass_v_bf16m8_u16m8_m(vbool2_t vm, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfclass_v_bf16m8_u16m8_m(vm, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmacc.c new file mode 100644 index 0000000000000..616455d5f3f9e --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmacc.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16mf4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16mf4(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16mf2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16mf2(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m1(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m1(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m2(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m4(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m8(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m8(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16mf4_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16mf4_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16mf2_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16mf2_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m1_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m1_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m2_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m2_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m4_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m4_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m8_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m8_m(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmadd.c new file mode 100644 index 0000000000000..eec662a3671c8 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmadd.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16mf4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16mf4(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16mf2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16mf2(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m1(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m1(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m2(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m4(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m8(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m8(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16mf4_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16mf4_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16mf2_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16mf2_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m1_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m1_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m2_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m2_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m4_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m4_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m8_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m8_m(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmax.c new file mode 100644 index 0000000000000..dfdeb4e967a46 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmax.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m8_m(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmerge.c new file mode 100644 index 0000000000000..96221c5385dd9 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmerge.c @@ -0,0 +1,69 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmerge_vfm_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, vbool64_t mask, size_t vl) { + return __riscv_vfmerge_vfm_bf16mf4(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmerge_vfm_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, vbool32_t mask, size_t vl) { + return __riscv_vfmerge_vfm_bf16mf2(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmerge_vfm_bf16m1(vbfloat16m1_t op1, __bf16 op2, vbool16_t mask, size_t vl) { + return __riscv_vfmerge_vfm_bf16m1(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmerge_vfm_bf16m2(vbfloat16m2_t op1, __bf16 op2, vbool8_t mask, size_t vl) { + return __riscv_vfmerge_vfm_bf16m2(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmerge_vfm_bf16m4(vbfloat16m4_t op1, __bf16 op2, vbool4_t mask, size_t vl) { + return __riscv_vfmerge_vfm_bf16m4(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmerge_vfm_bf16m8(vbfloat16m8_t op1, __bf16 op2, vbool2_t mask, size_t vl) { + return __riscv_vfmerge_vfm_bf16m8(op1, op2, mask, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmin.c new file mode 100644 index 0000000000000..8f8d82ba21bc5 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmin.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m8_m(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsac.c new file mode 100644 index 0000000000000..f4644dfb8d7e7 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsac.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16mf4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16mf4(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16mf2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16mf2(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m1(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m1(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m2(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m4(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m8(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m8(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16mf4_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16mf4_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16mf2_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16mf2_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m1_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m1_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m2_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m2_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m4_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m4_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m8_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m8_m(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsub.c new file mode 100644 index 0000000000000..07053afa3355c --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmsub.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16mf4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16mf4(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16mf2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16mf2(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m1(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m1(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m2(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m4(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m8(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m8(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16mf4_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16mf4_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16mf2_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16mf2_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m1_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m1_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m2_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m2_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m4_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m4_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m8_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m8_m(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmul.c new file mode 100644 index 0000000000000..88fb329934365 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmul.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m8_m(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmv.c new file mode 100644 index 0000000000000..d80ec3df1bbaa --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfmv.c @@ -0,0 +1,189 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16mf4( +// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv1bf16.i64( poison, bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmv_v_f_bf16mf4(__bf16 src, size_t vl) { + return __riscv_vfmv_v_f_bf16mf4(src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16mf2( +// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv2bf16.i64( poison, bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmv_v_f_bf16mf2(__bf16 src, size_t vl) { + return __riscv_vfmv_v_f_bf16mf2(src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16m1( +// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv4bf16.i64( poison, bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmv_v_f_bf16m1(__bf16 src, size_t vl) { + return __riscv_vfmv_v_f_bf16m1(src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16m2( +// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv8bf16.i64( poison, bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmv_v_f_bf16m2(__bf16 src, size_t vl) { + return __riscv_vfmv_v_f_bf16m2(src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16m4( +// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv16bf16.i64( poison, bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmv_v_f_bf16m4(__bf16 src, size_t vl) { + return __riscv_vfmv_v_f_bf16m4(src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16m8( +// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv32bf16.i64( poison, bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmv_v_f_bf16m8(__bf16 src, size_t vl) { + return __riscv_vfmv_v_f_bf16m8(src, vl); +} + +// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16mf4_bf16( +// CHECK-RV64-SAME: [[SRC:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv1bf16( [[SRC]]) +// CHECK-RV64-NEXT: ret bfloat [[TMP0]] +// +__bf16 test_vfmv_f_s_bf16mf4_bf16(vbfloat16mf4_t src) { + return __riscv_vfmv_f_s_bf16mf4_bf16(src); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16mf4( +// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1bf16.i64( poison, bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmv_s_f_bf16mf4(__bf16 src, size_t vl) { + return __riscv_vfmv_s_f_bf16mf4(src, vl); +} + +// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16mf2_bf16( +// CHECK-RV64-SAME: [[SRC:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv2bf16( [[SRC]]) +// CHECK-RV64-NEXT: ret bfloat [[TMP0]] +// +__bf16 test_vfmv_f_s_bf16mf2_bf16(vbfloat16mf2_t src) { + return __riscv_vfmv_f_s_bf16mf2_bf16(src); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16mf2( +// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2bf16.i64( poison, bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmv_s_f_bf16mf2(__bf16 src, size_t vl) { + return __riscv_vfmv_s_f_bf16mf2(src, vl); +} + +// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m1_bf16( +// CHECK-RV64-SAME: [[SRC:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv4bf16( [[SRC]]) +// CHECK-RV64-NEXT: ret bfloat [[TMP0]] +// +__bf16 test_vfmv_f_s_bf16m1_bf16(vbfloat16m1_t src) { + return __riscv_vfmv_f_s_bf16m1_bf16(src); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16m1( +// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4bf16.i64( poison, bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmv_s_f_bf16m1(__bf16 src, size_t vl) { + return __riscv_vfmv_s_f_bf16m1(src, vl); +} + +// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m2_bf16( +// CHECK-RV64-SAME: [[SRC:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv8bf16( [[SRC]]) +// CHECK-RV64-NEXT: ret bfloat [[TMP0]] +// +__bf16 test_vfmv_f_s_bf16m2_bf16(vbfloat16m2_t src) { + return __riscv_vfmv_f_s_bf16m2_bf16(src); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16m2( +// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8bf16.i64( poison, bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmv_s_f_bf16m2(__bf16 src, size_t vl) { + return __riscv_vfmv_s_f_bf16m2(src, vl); +} + +// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m4_bf16( +// CHECK-RV64-SAME: [[SRC:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv16bf16( [[SRC]]) +// CHECK-RV64-NEXT: ret bfloat [[TMP0]] +// +__bf16 test_vfmv_f_s_bf16m4_bf16(vbfloat16m4_t src) { + return __riscv_vfmv_f_s_bf16m4_bf16(src); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16m4( +// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv16bf16.i64( poison, bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmv_s_f_bf16m4(__bf16 src, size_t vl) { + return __riscv_vfmv_s_f_bf16m4(src, vl); +} + +// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m8_bf16( +// CHECK-RV64-SAME: [[SRC:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv32bf16( [[SRC]]) +// CHECK-RV64-NEXT: ret bfloat [[TMP0]] +// +__bf16 test_vfmv_f_s_bf16m8_bf16(vbfloat16m8_t src) { + return __riscv_vfmv_f_s_bf16m8_bf16(src); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16m8( +// CHECK-RV64-SAME: bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv32bf16.i64( poison, bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmv_s_f_bf16m8(__bf16 src, size_t vl) { + return __riscv_vfmv_s_f_bf16m8(src, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt.c new file mode 100644 index 0000000000000..a5afab9bec1ec --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt.c @@ -0,0 +1,724 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8(vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4(vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m1_i8mf2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m2_i8m1(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m4_i8m2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4(vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m8_i8m4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8(vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4(vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m2_u8m1(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m4_u8m2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4(vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m8_u8m4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m1(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_m(vbool64_t vm, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_m(vbool32_t vm, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_m(vbool16_t vm, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m2_i8m1_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m4_i8m2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_m(vbool2_t vm, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m8_i8m4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_m(vbool64_t vm, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_m(vbool32_t vm, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_m(vbool16_t vm, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_m(vbool2_t vm, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_m(vbool64_t vm, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_m(vbool32_t vm, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_m(vbool16_t vm, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m1_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_m(vbool8_t vm, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_m(vbool4_t vm, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm(vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm(vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm(vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm(vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm(vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm(vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf4_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf2_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m1_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m2_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m4_rm(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_m(vbool64_t vm, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_m(vbool32_t vm, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_m(vbool16_t vm, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_m(vbool2_t vm, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_m(vbool64_t vm, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_m(vbool32_t vm, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_m(vbool16_t vm, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_m(vbool2_t vm, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_m(vbool64_t vm, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_m(vbool32_t vm, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_m(vbool16_t vm, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m1_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_m(vbool8_t vm, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m2_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_m(vbool4_t vm, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m4_rm_m(vm, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rod.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rod.c new file mode 100644 index 0000000000000..70c377bba1dfb --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rod.c @@ -0,0 +1,113 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16mf4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16mf2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m1(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_m(vbool64_t vm, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16mf4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_m(vbool32_t vm, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16mf2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_m(vbool16_t vm, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m1_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_m(vbool8_t vm, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_m(vbool4_t vm, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m4_m(vm, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rtz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rtz.c new file mode 100644 index 0000000000000..854e9868109e4 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfncvt_rtz.c @@ -0,0 +1,267 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8(vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4(vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m2_i8m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m4_i8m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m8_i8m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4(vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8(vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4(vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4(vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_m(vbool64_t vm, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_m(vbool32_t vm, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_m(vbool16_t vm, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_m(vbool2_t vm, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_m(vbool64_t vm, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_m(vbool32_t vm, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_m(vbool16_t vm, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_m(vbool2_t vm, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4_m(vm, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmacc.c new file mode 100644 index 0000000000000..18484883a14f4 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmacc.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16mf4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16mf4(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16mf2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16mf2(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m1(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m1(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m2(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m4(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m8(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m8(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16mf4_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16mf4_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16mf2_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16mf2_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m1_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m1_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m2_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m2_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m4_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m4_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m8_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m8_m(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmadd.c new file mode 100644 index 0000000000000..e519e5acb4575 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmadd.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16mf4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16mf4(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16mf2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16mf2(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m1(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m1(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m2(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m4(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m8(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m8(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16mf4_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16mf4_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16mf2_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16mf2_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m1_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m1_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m2_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m2_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m4_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m4_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m8_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m8_m(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsac.c new file mode 100644 index 0000000000000..47e1f44f5a45f --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsac.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16mf4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16mf4(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16mf2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16mf2(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m1(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m1(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m2(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m4(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m8(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m8(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16mf4_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16mf4_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16mf2_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16mf2_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m1_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m1_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m2_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m2_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m4_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m4_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m8_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m8_m(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsub.c new file mode 100644 index 0000000000000..4b55b64542c61 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfnmsub.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16mf4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16mf4(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16mf2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16mf2(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m1(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m1(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m2(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m4(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m8(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m8(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16mf4_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16mf4_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16mf2_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16mf2_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m1_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m1_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m2_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m2_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m4_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m4_m(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m8_m(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m8_m(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrec7.c new file mode 100644 index 0000000000000..1ffee73e91d04 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrec7.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1bf16.i64( poison, [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrec7_v_bf16mf4(vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16mf4(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2bf16.i64( poison, [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrec7_v_bf16mf2(vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16mf2(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4bf16.i64( poison, [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrec7_v_bf16m1(vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m1(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8bf16.i64( poison, [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrec7_v_bf16m2(vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m2(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv16bf16.i64( poison, [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrec7_v_bf16m4(vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m4(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv32bf16.i64( poison, [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrec7_v_bf16m8(vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m8(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1bf16.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrec7_v_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16mf4_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2bf16.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrec7_v_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16mf2_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4bf16.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrec7_v_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m1_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8bf16.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrec7_v_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m2_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16bf16.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrec7_v_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m4_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv32bf16.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrec7_v_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m8_m(mask, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsqrt7.c new file mode 100644 index 0000000000000..964c4869622aa --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsqrt7.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1bf16.i64( poison, [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4(vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16mf4(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2bf16.i64( poison, [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2(vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16mf2(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4bf16.i64( poison, [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsqrt7_v_bf16m1(vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m1(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8bf16.i64( poison, [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsqrt7_v_bf16m2(vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m2(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv16bf16.i64( poison, [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsqrt7_v_bf16m4(vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m4(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv32bf16.i64( poison, [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsqrt7_v_bf16m8(vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m8(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64( poison, [[OP1]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16mf4_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64( poison, [[OP1]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16mf2_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64( poison, [[OP1]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsqrt7_v_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m1_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64( poison, [[OP1]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsqrt7_v_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m2_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64( poison, [[OP1]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsqrt7_v_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m4_m(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64( poison, [[OP1]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsqrt7_v_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m8_m(mask, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsub.c new file mode 100644 index 0000000000000..c7c3869e7b77c --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfrsub.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsub_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsub_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsub_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsub_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsub_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsub_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m8_m(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnj.c new file mode 100644 index 0000000000000..778b8b83e9841 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnj.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m8_m(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjn.c new file mode 100644 index 0000000000000..7de308978e1d3 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjn.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m8_m(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjx.c new file mode 100644 index 0000000000000..5fa285cc78b63 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsgnjx.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m8_m(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1down.c new file mode 100644 index 0000000000000..b94d26b4ddf40 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1down.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf4( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1down_vf_bf16mf4(vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16mf4(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf2( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1down_vf_bf16mf2(vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16mf2(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m1( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1down_vf_bf16m1(vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m1(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m2( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1down_vf_bf16m2(vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m2(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m4( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1down_vf_bf16m4(vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m4(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m8( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv32bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1down_vf_bf16m8(vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m8(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16mf4_m(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16mf2_m(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1down_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m1_m(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1down_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m2_m(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1down_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m4_m(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1down_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m8_m(mask, src, value, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1up.c new file mode 100644 index 0000000000000..06e8b49af19d0 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfslide1up.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf4( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1up_vf_bf16mf4(vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16mf4(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf2( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1up_vf_bf16mf2(vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16mf2(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m1( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1up_vf_bf16m1(vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m1(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m2( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1up_vf_bf16m2(vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m2(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m4( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1up_vf_bf16m4(vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m4(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m8( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv32bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1up_vf_bf16m8(vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m8(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16mf4_m(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16mf2_m(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1up_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m1_m(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1up_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m2_m(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1up_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m4_m(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1up_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m8_m(mask, src, value, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsub.c new file mode 100644 index 0000000000000..2423b0bbdbb80 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfsub.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16mf4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16mf2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m1(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16mf4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16mf2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m1_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m8_m(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwadd.c new file mode 100644 index 0000000000000..24d34f46f4203 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwadd.c @@ -0,0 +1,899 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16mf4_f32mf2(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16mf4_f32mf2(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2(vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16mf4_f32mf2(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2(vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32mf2(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16mf2_f32m1(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16mf2_f32m1(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1(vfloat32m1_t vs2, vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16mf2_f32m1(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1(vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m1(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_bf16m1_f32m2(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m1_f32m2(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2(vfloat32m2_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16m1_f32m2(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2(vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m2(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_bf16m2_f32m4(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m2_f32m4(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4(vfloat32m4_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16m2_f32m4(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4(vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m4(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_bf16m4_f32m8(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m4_f32m8(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8(vfloat32m8_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16m4_f32m8(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8(vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m8(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16mf4_f32mf2_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16mf4_f32mf2_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16mf4_f32mf2_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32mf2_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16mf2_f32m1_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16mf2_f32m1_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16mf2_f32m1_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m1_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m1_f32m2_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16m1_f32m2_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m1_f32m2_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m2_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m2_f32m4_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16m2_f32m4_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m2_f32m4_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m4_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m4_f32m8_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16m4_f32m8_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m4_f32m8_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m8_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm(vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm(vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16mf2_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm(vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm(vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m1_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m1_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm(vfloat32m2_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16m1_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm(vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m2_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m2_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm(vfloat32m4_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16m2_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm(vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m4_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m4_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm(vfloat32m8_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16m4_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm(vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16mf2_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m1_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16m1_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m1_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m2_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16m2_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m2_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m4_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16m4_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m4_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwcvt.c new file mode 100644 index 0000000000000..fb3e0031af98c --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwcvt.c @@ -0,0 +1,366 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4(vint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16mf4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2(vint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16mf2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1(vint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m1(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2(vint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4(vint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8(vint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m8(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4(vuint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16mf4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2(vuint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16mf2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1(vuint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m1(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2(vuint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4(vuint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8(vuint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m8(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2(vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1(vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m1_f32m2(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m2_f32m4(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m4_f32m8(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_m(vbool64_t vm, vint8mf8_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16mf4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_m(vbool32_t vm, vint8mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16mf2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_m(vbool16_t vm, vint8mf2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m1_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_m(vbool8_t vm, vint8m1_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_m(vbool4_t vm, vint8m2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_m(vbool2_t vm, vint8m4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m8_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_m(vbool64_t vm, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16mf4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_m(vbool32_t vm, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16mf2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_m(vbool16_t vm, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m1_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_m(vbool8_t vm, vuint8m1_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_m(vbool4_t vm, vuint8m2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_m(vbool2_t vm, vuint8m4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m8_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_m(vbool64_t vm, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m1_f32m2_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m2_f32m4_m(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m4_f32m8_m(vm, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmacc.c new file mode 100644 index 0000000000000..be09003320386 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmacc.c @@ -0,0 +1,486 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16mf4_f32mf2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16mf4_f32mf2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16mf2_f32m1(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16mf2_f32m1(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m1_f32m2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16m1_f32m2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m2_f32m4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16m2_f32m4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m4_f32m8(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16m4_f32m8(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmsac.c new file mode 100644 index 0000000000000..749081333c2b3 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmsac.c @@ -0,0 +1,486 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16mf4_f32mf2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16mf4_f32mf2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16mf2_f32m1(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16mf2_f32m1(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m1_f32m2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16m1_f32m2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m2_f32m4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16m2_f32m4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m4_f32m8(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16m4_f32m8(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmul.c new file mode 100644 index 0000000000000..6783ba43b0570 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwmul.c @@ -0,0 +1,455 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16mf4_f32mf2(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16mf4_f32mf2(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16mf2_f32m1(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16mf2_f32m1(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwmul_vv_bf16m1_f32m2(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m1_f32m2(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwmul_vv_bf16m2_f32m4(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m2_f32m4(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwmul_vv_bf16m4_f32m8(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m4_f32m8(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16mf4_f32mf2_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16mf4_f32mf2_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16mf2_f32m1_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16mf2_f32m1_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m1_f32m2_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16m1_f32m2_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m2_f32m4_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16m2_f32m4_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m4_f32m8_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16m4_f32m8_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16mf2_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m1_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m1_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m2_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m2_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m4_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m4_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16mf2_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m1_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16m1_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m2_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16m2_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m4_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16m4_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmacc.c new file mode 100644 index 0000000000000..6127a94c919d9 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmacc.c @@ -0,0 +1,494 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf4_f32mf2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf4_f32mf2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf2_f32m1(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf2_f32m1(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m1_f32m2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16m1_f32m2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m2_f32m4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16m2_f32m4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m4_f32m8(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16m4_f32m8(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmsac.c new file mode 100644 index 0000000000000..f37dd310d944d --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwnmsac.c @@ -0,0 +1,494 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf4_f32mf2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf4_f32mf2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf2_f32m1(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf2_f32m1(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m1_f32m2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16m1_f32m2(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m2_f32m4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16m2_f32m4(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m4_f32m8(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16m4_f32m8(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf2_f32m1_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m1_f32m2_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m2_f32m4_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m4_f32m8_m(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm_m(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwsub.c new file mode 100644 index 0000000000000..510ff9193389e --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vfwsub.c @@ -0,0 +1,899 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16mf4_f32mf2(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16mf4_f32mf2(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2(vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16mf4_f32mf2(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2(vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32mf2(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16mf2_f32m1(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16mf2_f32m1(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1(vfloat32m1_t vs2, vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16mf2_f32m1(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1(vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m1(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_bf16m1_f32m2(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m1_f32m2(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2(vfloat32m2_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16m1_f32m2(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2(vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m2(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_bf16m2_f32m4(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m2_f32m4(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4(vfloat32m4_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16m2_f32m4(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4(vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m4(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_bf16m4_f32m8(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m4_f32m8(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8(vfloat32m8_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16m4_f32m8(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8(vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m8(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16mf4_f32mf2_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16mf4_f32mf2_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16mf4_f32mf2_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32mf2_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16mf2_f32m1_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16mf2_f32m1_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16mf2_f32m1_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m1_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m1_f32m2_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16m1_f32m2_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m1_f32m2_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m2_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m2_f32m4_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16m2_f32m4_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m2_f32m4_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m4_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m4_f32m8_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16m4_f32m8_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m4_f32m8_m(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m8_m(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm(vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm(vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32mf2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16mf2_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm(vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16mf2_f32m1_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm(vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m1_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m1_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m1_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm(vfloat32m2_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16m1_f32m2_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm(vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m2_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m2_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m2_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm(vfloat32m4_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16m2_f32m4_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm(vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m4_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m4_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m4_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm(vfloat32m8_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16m4_f32m8_rm(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm(vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m8_rm(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32mf2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16mf2_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16mf2_f32m1_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m1_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m1_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16m1_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m1_f32m2_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m2_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m2_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16m2_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m2_f32m4_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m4_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m4_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16m4_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m4_f32m8_rm_m(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m8_rm_m(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfeq.c new file mode 100644 index 0000000000000..669d0427b569a --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfeq.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1bf16.nxv1bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2bf16.nxv2bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4bf16.nxv4bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8bf16.nxv8bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv16bf16.nxv16bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv16bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv32bf16.nxv32bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfeq_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16m8_b2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv32bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfeq_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16m8_b2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16mf4_b64_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16mf4_b64_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16mf2_b32_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16mf2_b32_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16m1_b16_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16m1_b16_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16m2_b8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16m2_b8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16m4_b4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16m4_b4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfeq_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16m8_b2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfeq_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16m8_b2_m(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfge.c new file mode 100644 index 0000000000000..b169efd51a0b4 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfge.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1bf16.nxv1bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2bf16.nxv2bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4bf16.nxv4bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8bf16.nxv8bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv16bf16.nxv16bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv16bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv32bf16.nxv32bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfge_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16m8_b2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv32bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfge_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16m8_b2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16mf4_b64_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16mf4_b64_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16mf2_b32_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16mf2_b32_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16m1_b16_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16m1_b16_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16m2_b8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16m2_b8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16m4_b4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16m4_b4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfge_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16m8_b2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfge_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16m8_b2_m(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfgt.c new file mode 100644 index 0000000000000..9aea7d24b0edc --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfgt.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1bf16.nxv1bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2bf16.nxv2bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4bf16.nxv4bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8bf16.nxv8bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv16bf16.nxv16bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv16bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv32bf16.nxv32bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfgt_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16m8_b2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv32bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfgt_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16m8_b2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16mf4_b64_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16mf4_b64_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16mf2_b32_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16mf2_b32_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16m1_b16_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16m1_b16_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16m2_b8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16m2_b8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16m4_b4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16m4_b4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfgt_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16m8_b2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfgt_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16m8_b2_m(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfle.c new file mode 100644 index 0000000000000..40f0c27f5b37a --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfle.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1bf16.nxv1bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2bf16.nxv2bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4bf16.nxv4bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8bf16.nxv8bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv16bf16.nxv16bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv16bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv32bf16.nxv32bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfle_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16m8_b2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv32bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfle_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16m8_b2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16mf4_b64_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16mf4_b64_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16mf2_b32_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16mf2_b32_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16m1_b16_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16m1_b16_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16m2_b8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16m2_b8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16m4_b4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16m4_b4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfle_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16m8_b2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfle_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16m8_b2_m(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmflt.c new file mode 100644 index 0000000000000..f64eee3effdaf --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmflt.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1bf16.nxv1bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2bf16.nxv2bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4bf16.nxv4bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8bf16.nxv8bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv16bf16.nxv16bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv16bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv32bf16.nxv32bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmflt_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16m8_b2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv32bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmflt_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16m8_b2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16mf4_b64_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16mf4_b64_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16mf2_b32_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16mf2_b32_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16m1_b16_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16m1_b16_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16m2_b8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16m2_b8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16m4_b4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16m4_b4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmflt_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16m8_b2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmflt_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16m8_b2_m(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfne.c new file mode 100644 index 0000000000000..809ea5628e394 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/non-overloaded/vmfne.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1bf16.nxv1bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16mf4_b64(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2bf16.nxv2bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16mf2_b32(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4bf16.nxv4bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16m1_b16(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8bf16.nxv8bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16m2_b8(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv16bf16.nxv16bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv16bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16m4_b4(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv32bf16.nxv32bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfne_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16m8_b2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv32bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfne_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16m8_b2(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16mf4_b64_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16mf4_b64_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16mf2_b32_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16mf2_b32_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16m1_b16_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16m1_b16_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16m2_b8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16m2_b8_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16m4_b4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16m4_b4_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfne_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16m8_b2_m(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfne_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16m8_b2_m(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfadd.c new file mode 100644 index 0000000000000..9d6b071c2768c --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfadd.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfclass.c new file mode 100644 index 0000000000000..2760f85a45d3c --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfclass.c @@ -0,0 +1,134 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf4_u16mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4(vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfclass(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf2_u16mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2(vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfclass(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m1_u16m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfclass_v_bf16m1_u16m1(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfclass(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m2_u16m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfclass_v_bf16m2_u16m2(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfclass(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m4_u16m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv16bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfclass_v_bf16m4_u16m4(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfclass(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m8_u16m8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv32bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfclass_v_bf16m8_u16m8(vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfclass(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf4_u16mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_m(vbool64_t vm, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfclass(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf2_u16mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_m(vbool32_t vm, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfclass(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m1_u16m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfclass_v_bf16m1_u16m1_m(vbool16_t vm, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfclass(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m2_u16m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfclass_v_bf16m2_u16m2_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfclass(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m4_u16m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfclass_v_bf16m4_u16m4_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfclass(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m8_u16m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv32bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfclass_v_bf16m8_u16m8_m(vbool2_t vm, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfclass(vm, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmacc.c new file mode 100644 index 0000000000000..ae3f1f24eb762 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmacc.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmadd.c new file mode 100644 index 0000000000000..db2184c6e1dba --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmadd.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmax.c new file mode 100644 index 0000000000000..66497bfb15934 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmax.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmax(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmax(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmax(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmax(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmax(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmax(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmerge.c new file mode 100644 index 0000000000000..1dc290bf1a222 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmerge.c @@ -0,0 +1,69 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmerge_vfm_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, vbool64_t mask, size_t vl) { + return __riscv_vfmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmerge_vfm_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, vbool32_t mask, size_t vl) { + return __riscv_vfmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmerge_vfm_bf16m1(vbfloat16m1_t op1, __bf16 op2, vbool16_t mask, size_t vl) { + return __riscv_vfmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmerge_vfm_bf16m2(vbfloat16m2_t op1, __bf16 op2, vbool8_t mask, size_t vl) { + return __riscv_vfmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmerge_vfm_bf16m4(vbfloat16m4_t op1, __bf16 op2, vbool4_t mask, size_t vl) { + return __riscv_vfmerge(op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmerge_vfm_bf16m8(vbfloat16m8_t op1, __bf16 op2, vbool2_t mask, size_t vl) { + return __riscv_vfmerge(op1, op2, mask, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmin.c new file mode 100644 index 0000000000000..1564d1195ecef --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmin.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmin(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmin(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmin(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmin(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmin(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmin(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsac.c new file mode 100644 index 0000000000000..0384e7da29e08 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsac.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsub.c new file mode 100644 index 0000000000000..306f189b54c89 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmsub.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmul.c new file mode 100644 index 0000000000000..fffd83a12d36c --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmul.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmul(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmul(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmul(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmul(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmul(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmul(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmv.c new file mode 100644 index 0000000000000..f85378ff0f37a --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfmv.c @@ -0,0 +1,69 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16mf4_bf16( +// CHECK-RV64-SAME: [[SRC:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv1bf16( [[SRC]]) +// CHECK-RV64-NEXT: ret bfloat [[TMP0]] +// +__bf16 test_vfmv_f_s_bf16mf4_bf16(vbfloat16mf4_t src) { + return __riscv_vfmv_f(src); +} + +// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16mf2_bf16( +// CHECK-RV64-SAME: [[SRC:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv2bf16( [[SRC]]) +// CHECK-RV64-NEXT: ret bfloat [[TMP0]] +// +__bf16 test_vfmv_f_s_bf16mf2_bf16(vbfloat16mf2_t src) { + return __riscv_vfmv_f(src); +} + +// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m1_bf16( +// CHECK-RV64-SAME: [[SRC:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv4bf16( [[SRC]]) +// CHECK-RV64-NEXT: ret bfloat [[TMP0]] +// +__bf16 test_vfmv_f_s_bf16m1_bf16(vbfloat16m1_t src) { + return __riscv_vfmv_f(src); +} + +// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m2_bf16( +// CHECK-RV64-SAME: [[SRC:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv8bf16( [[SRC]]) +// CHECK-RV64-NEXT: ret bfloat [[TMP0]] +// +__bf16 test_vfmv_f_s_bf16m2_bf16(vbfloat16m2_t src) { + return __riscv_vfmv_f(src); +} + +// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m4_bf16( +// CHECK-RV64-SAME: [[SRC:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv16bf16( [[SRC]]) +// CHECK-RV64-NEXT: ret bfloat [[TMP0]] +// +__bf16 test_vfmv_f_s_bf16m4_bf16(vbfloat16m4_t src) { + return __riscv_vfmv_f(src); +} + +// CHECK-RV64-LABEL: define dso_local bfloat @test_vfmv_f_s_bf16m8_bf16( +// CHECK-RV64-SAME: [[SRC:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call bfloat @llvm.riscv.vfmv.f.s.nxv32bf16( [[SRC]]) +// CHECK-RV64-NEXT: ret bfloat [[TMP0]] +// +__bf16 test_vfmv_f_s_bf16m8_bf16(vbfloat16m8_t src) { + return __riscv_vfmv_f(src); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt.c new file mode 100644 index 0000000000000..fb635d61670c2 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt.c @@ -0,0 +1,724 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8(vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4(vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4(vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8(vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4(vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4(vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64( poison, [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_m(vbool64_t vm, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_x(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_m(vbool32_t vm, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_x(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_m(vbool16_t vm, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_x(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_x(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_x(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_m(vbool2_t vm, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_x(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_m(vbool64_t vm, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_m(vbool32_t vm, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_m(vbool16_t vm, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_m(vbool2_t vm, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_m(vbool64_t vm, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_m(vbool32_t vm, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_m(vbool16_t vm, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_m(vbool8_t vm, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64( poison, [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_m(vbool4_t vm, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm(vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm(vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm(vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm(vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm(vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm(vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64( poison, [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16(vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_m(vbool64_t vm, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_m(vbool32_t vm, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_m(vbool16_t vm, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_m(vbool2_t vm, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_x(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_m(vbool64_t vm, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_m(vbool32_t vm, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_m(vbool16_t vm, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_m(vbool2_t vm, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_xu(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_m(vbool64_t vm, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_m(vbool32_t vm, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_m(vbool16_t vm, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_m(vbool8_t vm, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16(vm, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64( poison, [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_m(vbool4_t vm, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16(vm, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rod.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rod.c new file mode 100644 index 0000000000000..1ad856df48c4b --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rod.c @@ -0,0 +1,113 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4(vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2(vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1(vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2(vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4(vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_m(vbool64_t vm, vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rod_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_m(vbool32_t vm, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rod_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_m(vbool16_t vm, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfncvt_rod_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_m(vbool8_t vm, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfncvt_rod_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_m(vbool4_t vm, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfncvt_rod_f_bf16(vm, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rtz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rtz.c new file mode 100644 index 0000000000000..12d08934bd49d --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfncvt_rtz.c @@ -0,0 +1,267 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8(vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4(vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m2_i8m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m4_i8m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m8_i8m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4(vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8(vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4(vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4(vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_m(vbool64_t vm, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_m(vbool32_t vm, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_m(vbool16_t vm, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_m(vbool2_t vm, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_m(vbool64_t vm, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_m(vbool32_t vm, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_m(vbool16_t vm, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_m(vbool2_t vm, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu(vm, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmacc.c new file mode 100644 index 0000000000000..6f7928b763230 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmacc.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmadd.c new file mode 100644 index 0000000000000..97d207040e5b2 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmadd.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsac.c new file mode 100644 index 0000000000000..404b4f83c4f1f --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsac.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsub.c new file mode 100644 index 0000000000000..3a520dd9af9bf --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfnmsub.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vv_bf16mf4(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vf_bf16mf4(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vv_bf16mf2(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vf_bf16mf2(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vv_bf16m1(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vf_bf16m1(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vv_bf16m2(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vf_bf16m2(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vv_bf16m4(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vf_bf16m4(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vv_bf16m8(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vf_bf16m8(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrec7.c new file mode 100644 index 0000000000000..462b6acf8eee6 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrec7.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1bf16.i64( poison, [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrec7_v_bf16mf4(vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrec7(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2bf16.i64( poison, [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrec7_v_bf16mf2(vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrec7(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4bf16.i64( poison, [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrec7_v_bf16m1(vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrec7(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8bf16.i64( poison, [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrec7_v_bf16m2(vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrec7(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv16bf16.i64( poison, [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrec7_v_bf16m4(vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrec7(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv32bf16.i64( poison, [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrec7_v_bf16m8(vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrec7(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1bf16.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrec7_v_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrec7(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2bf16.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrec7_v_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrec7(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4bf16.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrec7_v_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrec7(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8bf16.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrec7_v_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrec7(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16bf16.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrec7_v_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrec7(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv32bf16.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrec7_v_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrec7(mask, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsqrt7.c new file mode 100644 index 0000000000000..051fde7ef5472 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsqrt7.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1bf16.i64( poison, [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4(vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrsqrt7(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2bf16.i64( poison, [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2(vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrsqrt7(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4bf16.i64( poison, [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsqrt7_v_bf16m1(vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrsqrt7(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8bf16.i64( poison, [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsqrt7_v_bf16m2(vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrsqrt7(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv16bf16.i64( poison, [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsqrt7_v_bf16m4(vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrsqrt7(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv32bf16.i64( poison, [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsqrt7_v_bf16m8(vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrsqrt7(op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64( poison, [[OP1]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrsqrt7(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64( poison, [[OP1]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrsqrt7(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64( poison, [[OP1]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsqrt7_v_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrsqrt7(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64( poison, [[OP1]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsqrt7_v_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrsqrt7(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64( poison, [[OP1]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsqrt7_v_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrsqrt7(mask, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64( poison, [[OP1]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsqrt7_v_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrsqrt7(mask, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsub.c new file mode 100644 index 0000000000000..04941823d1917 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfrsub.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsub_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsub_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsub_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsub_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsub_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsub_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnj.c new file mode 100644 index 0000000000000..615deddf4254f --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnj.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnj(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnj(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnj(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnj(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnj(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnj(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjn.c new file mode 100644 index 0000000000000..a895e5f6ad6bd --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjn.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjn(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjn(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjn(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjn(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjn(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjn(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjx.c new file mode 100644 index 0000000000000..0187516d00dc1 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsgnjx.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjx(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjx(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjx(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjx(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjx(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjx(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1down.c new file mode 100644 index 0000000000000..4a7689492de92 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1down.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf4( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1down_vf_bf16mf4(vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf2( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1down_vf_bf16mf2(vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m1( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1down_vf_bf16m1(vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m2( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1down_vf_bf16m2(vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m4( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1down_vf_bf16m4(vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m8( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv32bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1down_vf_bf16m8(vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1down_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1down_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1down_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1down_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down(mask, src, value, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1up.c new file mode 100644 index 0000000000000..f9f2dc0cf26b8 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfslide1up.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf4( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1up_vf_bf16mf4(vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf2( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1up_vf_bf16mf2(vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m1( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1up_vf_bf16m1(vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m2( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1up_vf_bf16m2(vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m4( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1up_vf_bf16m4(vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m8( +// CHECK-RV64-SAME: [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv32bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1up_vf_bf16m8(vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up(src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1up_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1up_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1up_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up(mask, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64( poison, [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1up_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up(mask, src, value, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsub.c new file mode 100644 index 0000000000000..ebcf6fa4669a3 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfsub.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vv_bf16mf4(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vf_bf16mf4(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vv_bf16mf2(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vf_bf16mf2(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vv_bf16m1(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m1( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vf_bf16m1(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vv_bf16m2(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vf_bf16m2(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vv_bf16m4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vf_bf16m4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vv_bf16m8(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vf_bf16m8(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vv_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsub(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vf_bf16mf4_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vv_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsub(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vf_bf16mf2_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vv_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsub(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m1_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vf_bf16m1_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vv_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsub(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vf_bf16m2_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vv_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsub(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vf_bf16m4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vv_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsub(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vf_bf16m8_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwadd.c new file mode 100644 index 0000000000000..124e7fb6de342 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwadd.c @@ -0,0 +1,893 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2(vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2(vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1(vfloat32m1_t vs2, vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1(vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2(vfloat32m2_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2(vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4(vfloat32m4_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4(vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8(vfloat32m8_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8(vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm(vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm(vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm(vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm(vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm(vfloat32m2_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm(vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm(vfloat32m4_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm(vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm(vfloat32m8_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm(vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwcvt.c new file mode 100644 index 0000000000000..0399a639898a0 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwcvt.c @@ -0,0 +1,366 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4(vint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2(vint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1(vint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2(vint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4(vint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8(vint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4(vuint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2(vuint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1(vuint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2(vuint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4(vuint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8(vuint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2(vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1(vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2(vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4(vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16bf16.i64( poison, [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8(vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f(vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_m(vbool64_t vm, vint8mf8_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_m(vbool32_t vm, vint8mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_m(vbool16_t vm, vint8mf2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_m(vbool8_t vm, vint8m1_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_m(vbool4_t vm, vint8m2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_m(vbool2_t vm, vint8m4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_m(vbool64_t vm, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_m(vbool32_t vm, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_m(vbool16_t vm, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_m(vbool8_t vm, vuint8m1_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_m(vbool4_t vm, vuint8m2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_m(vbool2_t vm, vuint8m4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_m(vbool64_t vm, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VM]], i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f(vm, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmacc.c new file mode 100644 index 0000000000000..2eb7fc8cdeea8 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmacc.c @@ -0,0 +1,474 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmsac.c new file mode 100644 index 0000000000000..28f507619b428 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmsac.c @@ -0,0 +1,474 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmul.c new file mode 100644 index 0000000000000..8de49fa586dcf --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwmul.c @@ -0,0 +1,451 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwmul(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwmul(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwmul(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmacc.c new file mode 100644 index 0000000000000..783693106a217 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmacc.c @@ -0,0 +1,480 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmsac.c new file mode 100644 index 0000000000000..ca936af7140e5 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwnmsac.c @@ -0,0 +1,480 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1(vfloat32m1_t vd, vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwsub.c new file mode 100644 index 0000000000000..2e22e22e4b6fc --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vfwsub.c @@ -0,0 +1,893 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2(vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2(vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2(vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2(vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1(vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1(vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1(vfloat32m1_t vs2, vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1(vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2(vbfloat16m1_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2(vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2(vfloat32m2_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2(vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4(vbfloat16m2_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4(vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4(vfloat32m4_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4(vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8(vbfloat16m4_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8(vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8(vfloat32m8_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8(vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_m(vbool64_t vm, vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_m(vbool32_t vm, vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_m(vbool32_t vm, vfloat32m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_m(vbool16_t vm, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_m(vbool16_t vm, vfloat32m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_m(vbool8_t vm, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_m(vbool8_t vm, vfloat32m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_m(vbool4_t vm, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_m(vbool4_t vm, vfloat32m8_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm(vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm(vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm(vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm(vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm(vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm(vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm(vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm(vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm(vfloat32m2_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm(vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm(vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm(vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm(vfloat32m4_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm(vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm(vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm(vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm(vfloat32m8_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv(vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_rm( +// CHECK-RV64-SAME: [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm(vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf(vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_m(vbool64_t vm, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_m(vbool64_t vm, vfloat32mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_m(vbool32_t vm, vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_m(vbool32_t vm, vfloat32m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_m(vbool16_t vm, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_m(vbool16_t vm, vfloat32m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_m(vbool8_t vm, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_m(vbool8_t vm, vfloat32m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_m(vbool4_t vm, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64( poison, [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv(vm, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_rm_m( +// CHECK-RV64-SAME: [[VM:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64( poison, [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 3) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_m(vbool4_t vm, vfloat32m8_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf(vm, vs2, rs1, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfeq.c new file mode 100644 index 0000000000000..29881c9ba5d5e --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfeq.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1bf16.nxv1bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv1bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2bf16.nxv2bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv2bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4bf16.nxv4bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv4bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8bf16.nxv8bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv8bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv16bf16.nxv16bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv16bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv32bf16.nxv32bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfeq_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.nxv32bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfeq_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfeq(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfeq(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfeq(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfeq(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfeq(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfeq_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfeq(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfeq_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfge.c new file mode 100644 index 0000000000000..b8083c5ebb8df --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfge.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1bf16.nxv1bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv1bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2bf16.nxv2bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv2bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4bf16.nxv4bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv4bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8bf16.nxv8bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv8bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv16bf16.nxv16bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv16bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv32bf16.nxv32bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfge_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.nxv32bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfge_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfge(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfge(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfge(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfge(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfge(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfge_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfge(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfge_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfgt.c new file mode 100644 index 0000000000000..b8749b3295a19 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfgt.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1bf16.nxv1bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv1bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2bf16.nxv2bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv2bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4bf16.nxv4bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv4bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8bf16.nxv8bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv8bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv16bf16.nxv16bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv16bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv32bf16.nxv32bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfgt_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.nxv32bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfgt_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfgt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfgt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfgt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfgt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfgt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfgt_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfgt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfgt_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfle.c new file mode 100644 index 0000000000000..724608c89e8de --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfle.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1bf16.nxv1bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv1bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2bf16.nxv2bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv2bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4bf16.nxv4bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv4bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8bf16.nxv8bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv8bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv16bf16.nxv16bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv16bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv32bf16.nxv32bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfle_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.nxv32bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfle_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfle(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfle(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfle(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfle(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfle(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfle_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfle(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfle_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmflt.c new file mode 100644 index 0000000000000..1b0b898d5053f --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmflt.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1bf16.nxv1bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv1bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2bf16.nxv2bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv2bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4bf16.nxv4bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv4bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8bf16.nxv8bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv8bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv16bf16.nxv16bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv16bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv32bf16.nxv32bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmflt_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.nxv32bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmflt_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmflt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmflt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmflt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmflt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmflt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmflt_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmflt(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmflt_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfne.c new file mode 100644 index 0000000000000..672c1504d73eb --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/non-policy/overloaded/vmfne.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1bf16.nxv1bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vv_bf16mf4_b64(vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16mf4_b64( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv1bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vf_bf16mf4_b64(vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2bf16.nxv2bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vv_bf16mf2_b32(vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16mf2_b32( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv2bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vf_bf16mf2_b32(vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4bf16.nxv4bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vv_bf16m1_b16(vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m1_b16( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv4bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vf_bf16m1_b16(vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8bf16.nxv8bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vv_bf16m2_b8(vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m2_b8( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv8bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vf_bf16m2_b8(vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv16bf16.nxv16bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vv_bf16m4_b4(vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m4_b4( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv16bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vf_bf16m4_b4(vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv32bf16.nxv32bf16.i64( [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfne_vv_bf16m8_b2(vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m8_b2( +// CHECK-RV64-SAME: [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.nxv32bf16.bf16.i64( [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfne_vf_bf16m8_b2(vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne(op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1bf16.nxv1bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vv_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfne(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16mf4_b64_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vf_bf16mf4_b64_m(vbool64_t mask, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2bf16.nxv2bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vv_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfne(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16mf2_b32_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vf_bf16mf2_b32_m(vbool32_t mask, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4bf16.nxv4bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vv_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfne(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m1_b16_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vf_bf16m1_b16_m(vbool16_t mask, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8bf16.nxv8bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vv_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfne(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m2_b8_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vf_bf16m2_b8_m(vbool8_t mask, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16bf16.nxv16bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vv_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfne(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m4_b4_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vf_bf16m4_b4_m(vbool4_t mask, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv32bf16.nxv32bf16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfne_vv_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfne(mask, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m8_b2_m( +// CHECK-RV64-SAME: [[MASK:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv32bf16.bf16.i64( poison, [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfne_vf_bf16m8_b2_m(vbool2_t mask, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne(mask, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfadd.c new file mode 100644 index 0000000000000..6d55279ae306f --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfadd.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfclass.c new file mode 100644 index 0000000000000..8e6946de398b0 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfclass.c @@ -0,0 +1,272 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf4_u16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tu(vuint16mf4_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16mf4_u16mf4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf2_u16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tu(vuint16mf2_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16mf2_u16mf2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m1_u16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfclass_v_bf16m1_u16m1_tu(vuint16m1_t vd, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfclass_v_bf16m1_u16m1_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m2_u16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfclass_v_bf16m2_u16m2_tu(vuint16m2_t vd, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfclass_v_bf16m2_u16m2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m4_u16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv16bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfclass_v_bf16m4_u16m4_tu(vuint16m4_t vd, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfclass_v_bf16m4_u16m4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m8_u16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv32bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfclass_v_bf16m8_u16m8_tu(vuint16m8_t vd, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfclass_v_bf16m8_u16m8_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf4_u16mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16mf4_u16mf4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf2_u16mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16mf2_u16mf2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m1_u16m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfclass_v_bf16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16m1_u16m1_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m2_u16m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfclass_v_bf16m2_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16m2_u16m2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m4_u16m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfclass_v_bf16m4_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16m4_u16m4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m8_u16m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfclass_v_bf16m8_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16m8_u16m8_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf4_u16mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16mf4_u16mf4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf2_u16mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16mf2_u16mf2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m1_u16m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfclass_v_bf16m1_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16m1_u16m1_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m2_u16m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfclass_v_bf16m2_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16m2_u16m2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m4_u16m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfclass_v_bf16m4_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16m4_u16m4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m8_u16m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfclass_v_bf16m8_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16m8_u16m8_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf4_u16mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16mf4_u16mf4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf2_u16mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16mf2_u16mf2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m1_u16m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfclass_v_bf16m1_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16m1_u16m1_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m2_u16m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfclass_v_bf16m2_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16m2_u16m2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m4_u16m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfclass_v_bf16m4_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16m4_u16m4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m8_u16m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfclass_v_bf16m8_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfclass_v_bf16m8_u16m8_mu(vm, vd, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmacc.c new file mode 100644 index 0000000000000..2d4e481b1f8bb --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmacc.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16mf4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16mf4_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16mf2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m1_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m4_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m8_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m8_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m1_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m1_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m2_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m2_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m4_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m4_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m8_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m8_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m1_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m1_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m2_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m2_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m4_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m4_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vv_bf16m8_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_vf_bf16m8_mu(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmadd.c new file mode 100644 index 0000000000000..511e073e10143 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmadd.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16mf4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16mf4_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16mf2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m1_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m4_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m8_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m8_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m1_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m1_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m2_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m2_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m4_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m4_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m8_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m8_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m1_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m1_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m2_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m2_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m4_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m4_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vv_bf16m8_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_vf_bf16m8_mu(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmax.c new file mode 100644 index 0000000000000..f3698d41aff34 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmax.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmax_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmerge.c new file mode 100644 index 0000000000000..bcaf2cb8817e4 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmerge.c @@ -0,0 +1,69 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmerge_vfm_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, vbool64_t mask, size_t vl) { + return __riscv_vfmerge_vfm_bf16mf4_tu(maskedoff, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmerge_vfm_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, vbool32_t mask, size_t vl) { + return __riscv_vfmerge_vfm_bf16mf2_tu(maskedoff, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmerge_vfm_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, vbool16_t mask, size_t vl) { + return __riscv_vfmerge_vfm_bf16m1_tu(maskedoff, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmerge_vfm_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, vbool8_t mask, size_t vl) { + return __riscv_vfmerge_vfm_bf16m2_tu(maskedoff, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmerge_vfm_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, vbool4_t mask, size_t vl) { + return __riscv_vfmerge_vfm_bf16m4_tu(maskedoff, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmerge_vfm_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, vbool2_t mask, size_t vl) { + return __riscv_vfmerge_vfm_bf16m8_tu(maskedoff, op1, op2, mask, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmin.c new file mode 100644 index 0000000000000..911f8792e4436 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmin.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmin_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsac.c new file mode 100644 index 0000000000000..9575ad337de8c --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsac.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16mf4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16mf4_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16mf2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m1_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m4_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m8_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m8_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m1_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m1_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m2_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m2_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m4_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m4_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m8_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m8_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m1_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m1_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m2_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m2_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m4_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m4_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vv_bf16m8_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_vf_bf16m8_mu(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsub.c new file mode 100644 index 0000000000000..8e382f710ab5a --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmsub.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16mf4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16mf4_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16mf2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m1_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m4_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m8_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m8_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m1_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m1_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m2_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m2_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m4_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m4_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m8_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m8_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m1_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m1_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m2_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m2_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m4_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m4_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vv_bf16m8_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_vf_bf16m8_mu(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmul.c new file mode 100644 index 0000000000000..716f056f3e12c --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmul.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmul_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmv.c new file mode 100644 index 0000000000000..069ee6a2a5df3 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfmv.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv1bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmv_v_f_bf16mf4_tu(vbfloat16mf4_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_v_f_bf16mf4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv2bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmv_v_f_bf16mf2_tu(vbfloat16mf2_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_v_f_bf16mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv4bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmv_v_f_bf16m1_tu(vbfloat16m1_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_v_f_bf16m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv8bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmv_v_f_bf16m2_tu(vbfloat16m2_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_v_f_bf16m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv16bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmv_v_f_bf16m4_tu(vbfloat16m4_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_v_f_bf16m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv32bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmv_v_f_bf16m8_tu(vbfloat16m8_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_v_f_bf16m8_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmv_s_f_bf16mf4_tu(vbfloat16mf4_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_s_f_bf16mf4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmv_s_f_bf16mf2_tu(vbfloat16mf2_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_s_f_bf16mf2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmv_s_f_bf16m1_tu(vbfloat16m1_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_s_f_bf16m1_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmv_s_f_bf16m2_tu(vbfloat16m2_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_s_f_bf16m2_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv16bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmv_s_f_bf16m4_tu(vbfloat16m4_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_s_f_bf16m4_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv32bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmv_s_f_bf16m8_tu(vbfloat16m8_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_s_f_bf16m8_tu(maskedoff, src, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt.c new file mode 100644 index 0000000000000..36d4fc332499f --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt.c @@ -0,0 +1,1577 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tu(vint8mf8_t vd, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tu(vint8mf4_t vd, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tu(vint8mf2_t vd, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tu(vint8m1_t vd, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m2_i8m1_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tu(vint8m2_t vd, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m4_i8m2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tu(vint8m4_t vd, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m8_i8m4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tu(vuint8mf8_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tu(vuint8mf4_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tu(vuint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tu(vuint8m1_t vd, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tu(vuint8m2_t vd, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tu(vuint8m4_t vd, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tu(vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tu(vbfloat16mf2_t vd, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tu(vbfloat16m1_t vd, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m1_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tu(vbfloat16m2_t vd, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tu(vbfloat16m4_t vd, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tum(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m2_i8m1_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tum(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m4_i8m2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tum(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m8_i8m4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tum(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tum(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tum(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m1_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tumu(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m2_i8m1_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tumu(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m4_i8m2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tumu(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m8_i8m4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m1_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_mu(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m2_i8m1_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_mu(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m4_i8m2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_mu(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m8_i8m4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_mu(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_mu(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_mu(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m1_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tu(vint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tu(vint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tu(vint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tu(vint8m1_t vd, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tu(vint8m2_t vd, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tu(vint8m4_t vd, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tu(vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tu(vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tu(vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tu(vuint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tu(vuint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tu(vuint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tu(vuint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tu(vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tu(vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tu(vbfloat16m1_t vd, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m1_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tu(vbfloat16m2_t vd, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m2_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tu(vbfloat16m4_t vd, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m4_rm_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tum(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tum(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tum(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tum(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tum(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tum(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tum(vbool64_t vm, + vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tum(vm, vd, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tum(vbool32_t vm, + vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tum(vm, vd, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tum(vbool16_t vm, vuint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tum(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tum(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tum(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tum(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tum(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tum(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m1_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tum(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m2_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tum(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m4_rm_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tumu(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tumu(vm, vd, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tumu(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tumu(vm, vd, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tumu(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tumu(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tumu(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tumu(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tumu(vbool64_t vm, + vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tumu(vm, vd, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tumu(vbool32_t vm, + vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tumu(vm, vd, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tumu(vbool16_t vm, + vuint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tumu(vm, vd, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tumu(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tumu(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tumu(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tumu(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tumu(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tumu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m1_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tumu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m2_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tumu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m4_rm_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_mu(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf4_i8mf8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_mu(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_f_w_bf16mf2_i8mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_mu(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m1_i8mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_mu(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m2_i8m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_mu(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m4_i8m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_mu(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_f_w_bf16m8_i8m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_mu(vbool64_t vm, vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_mu(vbool32_t vm, vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_mu(vbool16_t vm, vuint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m1_u8mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_mu(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m2_u8m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_mu(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m4_u8m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_mu(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_f_w_bf16m8_u8m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_mu(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_mu(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16mf2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_mu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m1_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_mu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m2_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_mu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_f_w_bf16m4_rm_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rod.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rod.c new file mode 100644 index 0000000000000..84066846e188c --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rod.c @@ -0,0 +1,233 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tu(vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16mf4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tu(vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16mf2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tu(vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m1_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tu(vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tu(vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tum(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16mf4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tum(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16mf2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m1_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tumu(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16mf4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tumu(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16mf2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m1_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16mf4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16mf2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m1_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_f_w_bf16m4_mu(vm, vd, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rtz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rtz.c new file mode 100644 index 0000000000000..4644eff12e210 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfncvt_rtz.c @@ -0,0 +1,572 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tu(vint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tu(vint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tu(vint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tu(vint8m1_t vd, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tu(vint8m2_t vd, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tu(vint8m4_t vd, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tu(vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tu(vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tu(vuint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tu(vuint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tu(vuint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tu(vuint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tum(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tum(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tum(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tum(vbool64_t vm, + vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tum(vbool32_t vm, + vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tum(vbool16_t vm, + vuint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tum(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tum(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tum(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tumu(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tumu(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tumu(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tumu(vbool64_t vm, + vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tumu(vbool32_t vm, + vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tumu(vbool16_t vm, + vuint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m1_i8mf2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_mu(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m2_i8m1_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_mu(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m4_i8m2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_mu(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_f_w_bf16m8_i8m4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_mu(vbool64_t vm, + vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_mu(vbool32_t vm, + vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_mu(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m2_u8m1_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_mu(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m4_u8m2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_mu(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_f_w_bf16m8_u8m4_mu(vm, vd, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmacc.c new file mode 100644 index 0000000000000..93fd6ba2faff7 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmacc.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16mf4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16mf4_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16mf2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m1_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m4_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m8_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m8_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m1_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m1_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m2_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m2_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m4_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m4_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m8_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m8_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m1_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m1_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m2_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m2_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m4_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m4_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vv_bf16m8_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_vf_bf16m8_mu(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmadd.c new file mode 100644 index 0000000000000..d7e6b8225d8c0 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmadd.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16mf4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16mf4_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16mf2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m1_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m4_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m8_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m8_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m1_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m1_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m2_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m2_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m4_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m4_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m8_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m8_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m1_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m1_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m2_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m2_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m4_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m4_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vv_bf16m8_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_vf_bf16m8_mu(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsac.c new file mode 100644 index 0000000000000..e0c289d23c17f --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsac.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16mf4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16mf4_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16mf2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m1_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m4_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m8_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m8_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m1_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m1_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m2_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m2_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m4_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m4_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m8_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m8_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m1_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m1_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m2_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m2_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m4_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m4_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vv_bf16m8_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_vf_bf16m8_mu(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsub.c new file mode 100644 index 0000000000000..05ccda36e9032 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfnmsub.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16mf4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16mf4_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16mf2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m1_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m2_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m4_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m8_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m8_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16mf4_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16mf4_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16mf2_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16mf2_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m1_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m1_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m2_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m2_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m4_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m4_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m8_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m8_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16mf4_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16mf4_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16mf2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16mf2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m1_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m1_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m2_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m2_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m4_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m4_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m8_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m8_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16mf4_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16mf4_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16mf2_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16mf2_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m1_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m1_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m2_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m2_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m4_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m4_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vv_bf16m8_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_vf_bf16m8_mu(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrec7.c new file mode 100644 index 0000000000000..3123692b45a68 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrec7.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrec7_v_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16mf4_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrec7_v_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16mf2_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrec7_v_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m1_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrec7_v_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m2_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrec7_v_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m4_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrec7_v_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m8_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrec7_v_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16mf4_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrec7_v_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16mf2_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrec7_v_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m1_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrec7_v_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m2_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrec7_v_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m4_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrec7_v_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m8_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrec7_v_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16mf4_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrec7_v_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16mf2_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrec7_v_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m1_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrec7_v_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m2_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrec7_v_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m4_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrec7_v_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m8_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrec7_v_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16mf4_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrec7_v_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16mf2_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrec7_v_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m1_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrec7_v_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m2_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrec7_v_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m4_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrec7_v_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrec7_v_bf16m8_mu(mask, maskedoff, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsqrt7.c new file mode 100644 index 0000000000000..8436f0ea488b2 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsqrt7.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16mf4_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16mf2_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m1_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m2_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m4_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m8_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16mf4_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16mf2_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m1_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m2_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m4_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m8_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16mf4_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16mf2_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m1_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m2_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m4_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m8_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16mf4_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16mf2_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsqrt7_v_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m1_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsqrt7_v_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m2_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsqrt7_v_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m4_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsqrt7_v_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrsqrt7_v_bf16m8_mu(mask, maskedoff, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsub.c new file mode 100644 index 0000000000000..7dd2bb6cc502d --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfrsub.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsub_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsub_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsub_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsub_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnj.c new file mode 100644 index 0000000000000..b39a0be2238a9 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnj.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnj_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjn.c new file mode 100644 index 0000000000000..7542e7866c888 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjn.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjx.c new file mode 100644 index 0000000000000..104149e1b2fa4 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsgnjx.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1down.c new file mode 100644 index 0000000000000..228dc1cd064a8 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1down.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16mf4_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16mf2_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1down_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m1_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1down_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m2_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1down_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m4_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1down_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m8_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16mf4_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16mf2_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1down_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m1_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1down_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m2_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1down_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m4_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1down_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m8_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16mf4_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16mf2_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1down_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m1_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1down_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m2_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1down_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m4_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1down_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m8_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16mf4_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16mf2_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1down_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m1_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1down_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m2_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1down_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m4_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1down_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_vf_bf16m8_mu(mask, maskedoff, src, value, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1up.c new file mode 100644 index 0000000000000..9e6ff2b16f77d --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfslide1up.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16mf4_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16mf2_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1up_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m1_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1up_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m2_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1up_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m4_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1up_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m8_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16mf4_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16mf2_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1up_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m1_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1up_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m2_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1up_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m4_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1up_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m8_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16mf4_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16mf2_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1up_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m1_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1up_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m2_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1up_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m4_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1up_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m8_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16mf4_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16mf2_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1up_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m1_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1up_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m2_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1up_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m4_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1up_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_vf_bf16m8_mu(mask, maskedoff, src, value, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsub.c new file mode 100644 index 0000000000000..b6fd94ece20cd --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfsub.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16mf4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16mf2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m1_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m2_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m4_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m8_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16mf4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16mf2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m1_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m2_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m4_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m8_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16mf4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16mf2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m1_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m2_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m4_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m8_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16mf4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16mf2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m1_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsub_vv_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_vf_bf16m8_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwadd.c new file mode 100644 index 0000000000000..4bee376cfe0fb --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwadd.c @@ -0,0 +1,2007 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16mf4_f32mf2_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16mf4_f32mf2_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16mf4_f32mf2_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32mf2_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16mf2_f32m1_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16mf2_f32m1_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16mf2_f32m1_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m1_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m1_f32m2_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16m1_f32m2_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m1_f32m2_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m2_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m2_f32m4_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16m2_f32m4_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m2_f32m4_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m4_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m4_f32m8_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16m4_f32m8_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m4_f32m8_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m8_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16mf4_f32mf2_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16mf4_f32mf2_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16mf4_f32mf2_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32mf2_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16mf2_f32m1_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16mf2_f32m1_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16mf2_f32m1_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m1_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m1_f32m2_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m1_f32m2_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m1_f32m2_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m2_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m2_f32m4_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m2_f32m4_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m2_f32m4_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m4_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m4_f32m8_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m4_f32m8_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m4_f32m8_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m8_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_bf16mf4_f32mf2_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16mf4_f32mf2_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16mf4_f32mf2_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32mf2_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16mf2_f32m1_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16mf2_f32m1_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16mf2_f32m1_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m1_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m1_f32m2_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m1_f32m2_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m1_f32m2_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m2_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m2_f32m4_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m2_f32m4_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m2_f32m4_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m4_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m4_f32m8_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m4_f32m8_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m4_f32m8_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m8_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16mf4_f32mf2_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16mf4_f32mf2_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16mf4_f32mf2_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32mf2_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16mf2_f32m1_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16mf2_f32m1_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16mf2_f32m1_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m1_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m1_f32m2_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m1_f32m2_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16m1_f32m2_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m2_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m2_f32m4_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m2_f32m4_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16m2_f32m4_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m4_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m4_f32m8_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m4_f32m8_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16m4_f32m8_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m8_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tu(vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16mf2_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m1_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m1_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m1_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m2_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m2_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m2_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m4_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m4_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m4_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16mf2_f32m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m1_f32m2_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m1_f32m2_rm_tum(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m1_f32m2_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m2_f32m4_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m2_f32m4_rm_tum(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m2_f32m4_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m4_f32m8_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m4_f32m8_rm_tum(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m4_f32m8_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32mf2_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m1_f32m2_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m2_f32m4_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m4_f32m8_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16mf2_f32m1_rm_mu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m1_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m1_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m1_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m2_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m2_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m2_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_bf16m4_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_bf16m4_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_bf16m4_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_bf16_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwcvt.c new file mode 100644 index 0000000000000..9151319fcfb17 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwcvt.c @@ -0,0 +1,765 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tu(vbfloat16mf4_t vd, vint8mf8_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16mf4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tu(vbfloat16mf2_t vd, vint8mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16mf2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tu(vbfloat16m1_t vd, vint8mf2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m1_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tu(vbfloat16m2_t vd, vint8m1_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tu(vbfloat16m4_t vd, vint8m2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tu(vbfloat16m8_t vd, vint8m4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m8_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tu(vbfloat16mf4_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16mf4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tu(vbfloat16mf2_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16mf2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tu(vbfloat16m1_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m1_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tu(vbfloat16m2_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tu(vbfloat16m4_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tu(vbfloat16m8_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m8_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tu(vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m1_f32m2_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tu(vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m2_f32m4_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tu(vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m4_f32m8_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + vint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16mf4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + vint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16mf2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + vint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m1_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + vint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + vint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + vint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m8_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16mf4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16mf2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m1_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + vuint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + vuint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + vuint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m8_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m1_f32m2_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m2_f32m4_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m4_f32m8_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + vint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16mf4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + vint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16mf2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + vint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m1_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + vint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + vint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + vint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m8_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16mf4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16mf2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m1_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + vuint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + vuint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + vuint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m8_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m1_f32m2_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m2_f32m4_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m4_f32m8_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + vint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16mf4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + vint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16mf2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + vint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m1_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + vint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + vint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + vint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_x_v_bf16m8_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16mf4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16mf2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m1_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + vuint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + vuint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + vuint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_xu_v_bf16m8_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16mf4_f32mf2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16mf2_f32m1_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m1_f32m2_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m2_f32m4_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_f_v_bf16m4_f32m8_mu(vm, vd, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmacc.c new file mode 100644 index 0000000000000..f67b1001a5fcb --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmacc.c @@ -0,0 +1,1017 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16mf2_f32m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16mf2_f32m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m1_f32m2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16m1_f32m2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m2_f32m4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16m2_f32m4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m4_f32m8_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16m4_f32m8_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tum( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tumu( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, + vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, + vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_vv_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_vf_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmsac.c new file mode 100644 index 0000000000000..6d78c74e14694 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmsac.c @@ -0,0 +1,1017 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16mf2_f32m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16mf2_f32m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m1_f32m2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16m1_f32m2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m2_f32m4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16m2_f32m4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m4_f32m8_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16m4_f32m8_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tum( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tumu( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, + vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, + vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_vv_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_vf_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmul.c new file mode 100644 index 0000000000000..9fcfe818be71f --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwmul.c @@ -0,0 +1,1015 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16mf4_f32mf2_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16mf4_f32mf2_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16mf2_f32m1_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16mf2_f32m1_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m1_f32m2_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16m1_f32m2_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m2_f32m4_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16m2_f32m4_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m4_f32m8_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16m4_f32m8_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16mf4_f32mf2_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16mf4_f32mf2_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16mf2_f32m1_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16mf2_f32m1_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m1_f32m2_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m1_f32m2_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m2_f32m4_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m2_f32m4_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m4_f32m8_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m4_f32m8_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwmul_vv_bf16mf4_f32mf2_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16mf4_f32mf2_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16mf2_f32m1_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16mf2_f32m1_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m1_f32m2_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m1_f32m2_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m2_f32m4_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m2_f32m4_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m4_f32m8_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m4_f32m8_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16mf4_f32mf2_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16mf4_f32mf2_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16mf2_f32m1_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16mf2_f32m1_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m1_f32m2_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m1_f32m2_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m2_f32m4_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m2_f32m4_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m4_f32m8_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m4_f32m8_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16mf2_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m1_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m1_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m2_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m2_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m4_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m4_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwmul_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m1_f32m2_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m1_f32m2_rm_tum(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m2_f32m4_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m2_f32m4_rm_tum(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m4_f32m8_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m4_f32m8_rm_tum(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwmul_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwmul_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m1_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m1_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m2_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m2_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_vv_bf16m4_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_vf_bf16m4_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmacc.c new file mode 100644 index 0000000000000..73cc82219b201 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmacc.c @@ -0,0 +1,1034 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf2_f32m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf2_f32m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m1_f32m2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16m1_f32m2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m2_f32m4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16m2_f32m4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m4_f32m8_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16m4_f32m8_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tum(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tum( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, + vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, + vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tumu( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, + vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, + vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, + vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, + vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_mu( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_vv_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_vf_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmsac.c new file mode 100644 index 0000000000000..6133230e5cd84 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwnmsac.c @@ -0,0 +1,1034 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf2_f32m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf2_f32m1_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m1_f32m2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16m1_f32m2_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m2_f32m4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16m2_f32m4_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m4_f32m8_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16m4_f32m8_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tum(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf2_f32m1_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m1_f32m2_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m2_f32m4_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m4_f32m8_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf2_f32m1_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m1_f32m2_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m2_f32m4_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m4_f32m8_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf2_f32m1_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m1_f32m2_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m2_f32m4_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m4_f32m8_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm_tu(vd, vs1, vs2, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tum( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, + vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, + vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm_tum(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tumu( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, + vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, + vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, + vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, + vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_mu( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m1_f32m2_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m2_f32m4_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_vv_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_vf_bf16m4_f32m8_rm_mu(vm, vd, vs1, vs2, + __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwsub.c new file mode 100644 index 0000000000000..9d9b0b0aa2f3c --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vfwsub.c @@ -0,0 +1,2007 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16mf4_f32mf2_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16mf4_f32mf2_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16mf4_f32mf2_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32mf2_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16mf2_f32m1_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16mf2_f32m1_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16mf2_f32m1_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m1_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m1_f32m2_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16m1_f32m2_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m1_f32m2_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m2_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m2_f32m4_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16m2_f32m4_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m2_f32m4_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m4_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m4_f32m8_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16m4_f32m8_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m4_f32m8_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m8_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16mf4_f32mf2_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16mf4_f32mf2_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16mf4_f32mf2_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32mf2_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16mf2_f32m1_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16mf2_f32m1_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16mf2_f32m1_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m1_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m1_f32m2_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m1_f32m2_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m1_f32m2_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m2_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m2_f32m4_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m2_f32m4_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m2_f32m4_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m4_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m4_f32m8_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m4_f32m8_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m4_f32m8_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m8_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_bf16mf4_f32mf2_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16mf4_f32mf2_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16mf4_f32mf2_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32mf2_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16mf2_f32m1_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16mf2_f32m1_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16mf2_f32m1_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m1_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m1_f32m2_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m1_f32m2_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m1_f32m2_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m2_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m2_f32m4_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m2_f32m4_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m2_f32m4_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m4_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m4_f32m8_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m4_f32m8_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m4_f32m8_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m8_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16mf4_f32mf2_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16mf4_f32mf2_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16mf4_f32mf2_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32mf2_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16mf2_f32m1_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16mf2_f32m1_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16mf2_f32m1_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m1_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m1_f32m2_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m1_f32m2_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16m1_f32m2_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m2_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m2_f32m4_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m2_f32m4_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16m2_f32m4_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m4_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m4_f32m8_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m4_f32m8_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16m4_f32m8_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m8_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tu(vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32mf2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16mf2_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16mf2_f32m1_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m1_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m1_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m1_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m1_f32m2_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m2_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m2_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m2_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m2_f32m4_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m4_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m4_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m4_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m4_f32m8_rm_tu(vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m8_rm_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32mf2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_bf16mf2_f32m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16mf2_f32m1_rm_tum(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16mf2_f32m1_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m1_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m1_f32m2_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m1_f32m2_rm_tum(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m1_f32m2_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m2_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m2_f32m4_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m2_f32m4_rm_tum(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m2_f32m4_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m4_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m4_f32m8_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m4_f32m8_rm_tum(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m4_f32m8_rm_tum(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m8_rm_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32mf2_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16mf2_f32m1_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m1_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m1_f32m2_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m1_f32m2_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m1_f32m2_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m2_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m2_f32m4_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m2_f32m4_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m2_f32m4_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m4_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m4_f32m8_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m4_f32m8_rm_tumu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m4_f32m8_rm_tumu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m8_rm_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_bf16mf4_f32mf2_rm_mu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32mf2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16mf2_f32m1_rm_mu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16mf2_f32m1_rm_mu(vm, vd, vs2, rs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16mf2_f32m1_rm_mu(vm, vd, vs2, vs1, + __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m1_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m1_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m1_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m1_f32m2_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m2_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m2_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m2_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m2_f32m4_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m4_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_bf16m4_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_bf16m4_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_bf16m4_f32m8_rm_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, + vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_bf16_f32m8_rm_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, + vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfeq.c new file mode 100644 index 0000000000000..b96aae5615db8 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfeq.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfeq_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfeq_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfeq_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfge.c new file mode 100644 index 0000000000000..47d0427abf99b --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfge.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfge_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfge_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfge_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfgt.c new file mode 100644 index 0000000000000..0a0ead22361e9 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfgt.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfgt_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfgt_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfgt_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfle.c new file mode 100644 index 0000000000000..27ddefec8c9c0 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfle.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfle_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfle_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfle_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmflt.c new file mode 100644 index 0000000000000..d5f4f777580d3 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmflt.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmflt_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmflt_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmflt_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfne.c new file mode 100644 index 0000000000000..c2df9474acc72 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/non-overloaded/vmfne.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16mf4_b64_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16mf2_b32_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16m1_b16_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16m2_b8_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16m4_b4_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfne_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfne_vv_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfne_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_vf_bf16m8_b2_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfadd.c new file mode 100644 index 0000000000000..2bd3b3995c6c8 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfadd.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfadd.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfadd_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfclass.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfclass.c new file mode 100644 index 0000000000000..e2a993aca7069 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfclass.c @@ -0,0 +1,272 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf4_u16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv1bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tu(vuint16mf4_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfclass_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf2_u16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv2bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tu(vuint16mf2_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfclass_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m1_u16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv4bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfclass_v_bf16m1_u16m1_tu(vuint16m1_t vd, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfclass_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m2_u16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv8bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfclass_v_bf16m2_u16m2_tu(vuint16m2_t vd, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfclass_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m4_u16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv16bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfclass_v_bf16m4_u16m4_tu(vuint16m4_t vd, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfclass_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m8_u16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.nxv32bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfclass_v_bf16m8_u16m8_tu(vuint16m8_t vd, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfclass_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf4_u16mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfclass_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf2_u16mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfclass_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m1_u16m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfclass_v_bf16m1_u16m1_tum(vbool16_t vm, vuint16m1_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfclass_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m2_u16m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfclass_v_bf16m2_u16m2_tum(vbool8_t vm, vuint16m2_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfclass_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m4_u16m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfclass_v_bf16m4_u16m4_tum(vbool4_t vm, vuint16m4_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfclass_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m8_u16m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfclass_v_bf16m8_u16m8_tum(vbool2_t vm, vuint16m8_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfclass_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf4_u16mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfclass_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf2_u16mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfclass_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m1_u16m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfclass_v_bf16m1_u16m1_tumu(vbool16_t vm, vuint16m1_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfclass_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m2_u16m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfclass_v_bf16m2_u16m2_tumu(vbool8_t vm, vuint16m2_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfclass_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m4_u16m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfclass_v_bf16m4_u16m4_tumu(vbool4_t vm, vuint16m4_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfclass_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m8_u16m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfclass_v_bf16m8_u16m8_tumu(vbool2_t vm, vuint16m8_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfclass_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf4_u16mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf4_t test_vfclass_v_bf16mf4_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfclass_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16mf2_u16mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16mf2_t test_vfclass_v_bf16mf2_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfclass_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m1_u16m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m1_t test_vfclass_v_bf16m1_u16m1_mu(vbool16_t vm, vuint16m1_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfclass_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m2_u16m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m2_t test_vfclass_v_bf16m2_u16m2_mu(vbool8_t vm, vuint16m2_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfclass_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m4_u16m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m4_t test_vfclass_v_bf16m4_u16m4_mu(vbool4_t vm, vuint16m4_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfclass_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfclass_v_bf16m8_u16m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfclass.mask.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint16m8_t test_vfclass_v_bf16m8_u16m8_mu(vbool2_t vm, vuint16m8_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfclass_mu(vm, vd, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmacc.c new file mode 100644 index 0000000000000..eb7427107c942 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmacc.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmacc_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmacc_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmacc_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmacc_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmacc_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmacc_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmacc.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmacc_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmacc_mu(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmadd.c new file mode 100644 index 0000000000000..68d490d04ff86 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmadd.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmadd_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmadd.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmadd_mu(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmax.c new file mode 100644 index 0000000000000..5f682e80b61b5 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmax.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmax_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmax_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmax_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmax_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmax_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmax.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmax_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmax_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmerge.c new file mode 100644 index 0000000000000..9593ad5b5a592 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmerge.c @@ -0,0 +1,69 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmerge_vfm_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, vbool64_t mask, size_t vl) { + return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmerge_vfm_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, vbool32_t mask, size_t vl) { + return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmerge_vfm_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, vbool16_t mask, size_t vl) { + return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmerge_vfm_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, vbool8_t mask, size_t vl) { + return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmerge_vfm_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, vbool4_t mask, size_t vl) { + return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmerge_vfm_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmerge.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmerge_vfm_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, vbool2_t mask, size_t vl) { + return __riscv_vfmerge_tu(maskedoff, op1, op2, mask, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmin.c new file mode 100644 index 0000000000000..f3ef3c3614027 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmin.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmin_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmin_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmin_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmin_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmin_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmin.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmin_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmin_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsac.c new file mode 100644 index 0000000000000..0587c57af2b06 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsac.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsac_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsac_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsac_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsac_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsac_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsac_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsac.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsac_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsac_mu(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsub.c new file mode 100644 index 0000000000000..2ad26f8b24e12 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmsub.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmsub_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmsub.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfmsub_mu(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmul.c new file mode 100644 index 0000000000000..d1e726a9d63ea --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmul.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmul_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmul_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmul_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmul_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmul_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmul.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmul_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfmul_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmv.c new file mode 100644 index 0000000000000..9fd1ffce446bb --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfmv.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv1bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmv_v_f_bf16mf4_tu(vbfloat16mf4_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv2bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmv_v_f_bf16mf2_tu(vbfloat16mf2_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv4bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmv_v_f_bf16m1_tu(vbfloat16m1_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv8bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmv_v_f_bf16m2_tu(vbfloat16m2_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv16bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmv_v_f_bf16m4_tu(vbfloat16m4_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_v_f_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.v.f.nxv32bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmv_v_f_bf16m8_tu(vbfloat16m8_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_v_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv1bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfmv_s_f_bf16mf4_tu(vbfloat16mf4_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv2bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfmv_s_f_bf16mf2_tu(vbfloat16mf2_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv4bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfmv_s_f_bf16m1_tu(vbfloat16m1_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv8bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfmv_s_f_bf16m2_tu(vbfloat16m2_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv16bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfmv_s_f_bf16m4_tu(vbfloat16m4_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_s_tu(maskedoff, src, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfmv_s_f_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], bfloat noundef [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfmv.s.f.nxv32bf16.i64( [[MASKEDOFF]], bfloat [[SRC]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfmv_s_f_bf16m8_tu(vbfloat16m8_t maskedoff, __bf16 src, size_t vl) { + return __riscv_vfmv_s_tu(maskedoff, src, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt.c new file mode 100644 index 0000000000000..c6cd0a55fa530 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt.c @@ -0,0 +1,1539 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tu(vint8mf8_t vd, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tu(vint8mf4_t vd, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tu(vint8mf2_t vd, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tu(vint8m1_t vd, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tu(vint8m2_t vd, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tu(vint8m4_t vd, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tu(vuint8mf8_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tu(vuint8mf4_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tu(vuint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tu(vuint8m1_t vd, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tu(vuint8m2_t vd, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tu(vuint8m4_t vd, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tu(vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tu(vbfloat16mf2_t vd, vfloat32m1_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tu(vbfloat16m1_t vd, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tu(vbfloat16m2_t vd, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tu(vbfloat16m4_t vd, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tum(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tum(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tum(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tum(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tum(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tum(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_tumu(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_tumu(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_tumu(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_mu(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_mu(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_mu(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_mu(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_mu(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_mu(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tu(vint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tu(vint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tu(vint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tu(vint8m1_t vd, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tu(vint8m2_t vd, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tu(vint8m4_t vd, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_x_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tu(vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tu(vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tu(vuint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tu(vuint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tu(vuint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tu(vuint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tu(vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tu(vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tu(vbfloat16m1_t vd, vfloat32m2_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tu(vbfloat16m2_t vd, vfloat32m4_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tu(vbfloat16m4_t vd, vfloat32m8_t vs2, + size_t vl) { + return __riscv_vfncvt_f_bf16_tu(vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tum(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tum(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tum(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tum(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tum(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tum(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tum(vbool64_t vm, + vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tum(vbool32_t vm, + vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tum(vbool16_t vm, vuint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tum(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tum(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tum(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tum(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tum(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tum(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tum(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tum(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tum(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_tumu(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_tumu(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_tumu(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_tumu(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_tumu(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_tumu(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_tumu(vbool64_t vm, + vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_tumu(vbool32_t vm, + vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_tumu(vbool16_t vm, + vuint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_tumu(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_tumu(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_tumu(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_tumu(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_tumu(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_tumu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_tumu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_tumu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_tumu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_x_f_w_bf16mf4_i8mf8_rm_mu(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_x_f_w_bf16mf2_i8mf4_rm_mu(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m1_i8mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_x_f_w_bf16m1_i8mf2_rm_mu(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m2_i8m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_x_f_w_bf16m2_i8m1_rm_mu(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m4_i8m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_x_f_w_bf16m4_i8m2_rm_mu(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_bf16m8_i8m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_x_f_w_bf16m8_i8m4_rm_mu(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_x_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_xu_f_w_bf16mf4_u8mf8_rm_mu(vbool64_t vm, vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_xu_f_w_bf16mf2_u8mf4_rm_mu(vbool32_t vm, vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_xu_f_w_bf16m1_u8mf2_rm_mu(vbool16_t vm, vuint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m2_u8m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_xu_f_w_bf16m2_u8m1_rm_mu(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m4_u8m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_xu_f_w_bf16m4_u8m2_rm_mu(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_bf16m8_u8m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_xu_f_w_bf16m8_u8m4_rm_mu(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_xu_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_f_f_w_bf16mf4_rm_mu(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_f_f_w_bf16mf2_rm_mu(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_f_f_w_bf16m1_rm_mu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_f_f_w_bf16m2_rm_mu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_bf16m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_f_f_w_bf16m4_rm_mu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_f_bf16_mu(vm, vd, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rod.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rod.c new file mode 100644 index 0000000000000..0745633042d44 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rod.c @@ -0,0 +1,233 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tu(vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tu(vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tu(vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tu(vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tu(vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tum(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tum(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_tumu(vbool64_t vm, + vbfloat16mf4_t vd, + vfloat32mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rod_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_tumu(vbool32_t vm, + vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfncvt_rod_f_f_w_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + vfloat32mf2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfncvt_rod_f_f_w_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + vfloat32m1_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfncvt_rod_f_f_w_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + vfloat32m2_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfncvt_rod_f_f_w_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + vfloat32m4_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rod_f_f_w_bf16m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfncvt_rod_f_f_w_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + vfloat32m8_t vs2, size_t vl) { + return __riscv_vfncvt_rod_f_bf16_mu(vm, vd, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rtz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rtz.c new file mode 100644 index 0000000000000..b906c5f411064 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfncvt_rtz.c @@ -0,0 +1,572 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tu(vint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tu(vint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tu(vint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tu(vint8m1_t vd, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tu(vint8m2_t vd, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tu(vint8m4_t vd, vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tu(vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tu(vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tu(vuint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tu(vuint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tu(vuint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tu(vuint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tum(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tum(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tum(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tum(vbool64_t vm, + vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tum(vbool32_t vm, + vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tum(vbool16_t vm, + vuint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tum(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tum(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tum(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_tumu(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_tumu(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_tumu(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_tumu(vbool64_t vm, + vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_tumu(vbool32_t vm, + vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_tumu(vbool16_t vm, + vuint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf8_t test_vfncvt_rtz_x_f_w_bf16mf4_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf4_t test_vfncvt_rtz_x_f_w_bf16mf2_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8mf2_t test_vfncvt_rtz_x_f_w_bf16m1_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m2_i8m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vfncvt_rtz_x_f_w_bf16m2_i8m1_mu(vbool8_t vm, vint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m4_i8m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vfncvt_rtz_x_f_w_bf16m4_i8m2_mu(vbool4_t vm, vint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_x_f_w_bf16m8_i8m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vfncvt_rtz_x_f_w_bf16m8_i8m4_mu(vbool2_t vm, vint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_x_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf8_t test_vfncvt_rtz_xu_f_w_bf16mf4_u8mf8_mu(vbool64_t vm, + vuint8mf8_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf4_t test_vfncvt_rtz_xu_f_w_bf16mf2_u8mf4_mu(vbool32_t vm, + vuint8mf4_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8mf2_t test_vfncvt_rtz_xu_f_w_bf16m1_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m1_t test_vfncvt_rtz_xu_f_w_bf16m2_u8m1_mu(vbool8_t vm, vuint8m1_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m2_t test_vfncvt_rtz_xu_f_w_bf16m4_u8m2_mu(vbool4_t vm, vuint8m2_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vuint8m4_t test_vfncvt_rtz_xu_f_w_bf16m8_u8m4_mu(vbool2_t vm, vuint8m4_t vd, + vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfncvt_rtz_xu_mu(vm, vd, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmacc.c new file mode 100644 index 0000000000000..cc487b49429dd --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmacc.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmacc_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmacc_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmacc_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmacc_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmacc_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmacc_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmacc.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmacc_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmacc_mu(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmadd.c new file mode 100644 index 0000000000000..f9c348b3dbb0b --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmadd.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmadd_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmadd_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmadd_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmadd_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmadd_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmadd_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmadd.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmadd_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmadd_mu(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsac.c new file mode 100644 index 0000000000000..83d35e81403ce --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsac.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsac_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsac_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsac_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsac_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsac_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsac_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsac.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsac_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsac_mu(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsub.c new file mode 100644 index 0000000000000..f5282a195131d --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfnmsub.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tu(vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tu(vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tu(vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tu(vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vv_bf16m1_tu(vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vf_bf16m1_tu(vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vv_bf16m2_tu(vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vf_bf16m2_tu(vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vv_bf16m4_tu(vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vf_bf16m4_tu(vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vv_bf16m8_tu(vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vf_bf16m8_tu(vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tu(vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tum(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_tumu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, vbfloat16mf4_t vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfnmsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t vd, __bf16 rs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, vbfloat16mf2_t vs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfnmsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t vd, __bf16 rs1, vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, vbfloat16m1_t vs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfnmsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t vd, __bf16 rs1, vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, vbfloat16m2_t vs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfnmsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t vd, __bf16 rs1, vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, vbfloat16m4_t vs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfnmsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t vd, __bf16 rs1, vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32bf16.nxv32bf16.i64( [[VD]], [[VS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, vbfloat16m8_t vs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(mask, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfnmsub_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[VD:%.*]], bfloat noundef [[RS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfnmsub.mask.nxv32bf16.bf16.i64( [[VD]], bfloat [[RS1]], [[VS2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfnmsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t vd, __bf16 rs1, vbfloat16m8_t vs2, size_t vl) { + return __riscv_vfnmsub_mu(mask, vd, rs1, vs2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrec7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrec7.c new file mode 100644 index 0000000000000..f8e5a339c87d7 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrec7.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrec7_v_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrec7_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrec7_v_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrec7_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrec7_v_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrec7_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrec7_v_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrec7_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrec7_v_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrec7_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrec7_v_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrec7_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrec7_v_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrec7_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrec7_v_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrec7_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrec7_v_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrec7_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrec7_v_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrec7_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrec7_v_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrec7_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrec7_v_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrec7_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrec7_v_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrec7_v_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrec7_v_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrec7_v_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrec7_v_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrec7_v_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrec7_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrec7_v_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrec7_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrec7_v_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrec7_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrec7_v_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrec7_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrec7_v_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrec7_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrec7_v_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrec7_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrec7_v_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrec7.mask.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrec7_v_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrec7_mu(mask, maskedoff, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsqrt7.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsqrt7.c new file mode 100644 index 0000000000000..7c6c926e50c10 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsqrt7.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrsqrt7_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrsqrt7_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrsqrt7_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrsqrt7_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrsqrt7_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrsqrt7_tu(maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrsqrt7_tum(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsqrt7_v_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsqrt7_v_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsqrt7_v_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsqrt7_v_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrsqrt7_tumu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsqrt7_v_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, size_t vl) { + return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsqrt7_v_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, size_t vl) { + return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsqrt7_v_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, size_t vl) { + return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsqrt7_v_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, size_t vl) { + return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsqrt7_v_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, size_t vl) { + return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsqrt7_v_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsqrt7.mask.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsqrt7_v_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, size_t vl) { + return __riscv_vfrsqrt7_mu(mask, maskedoff, op1, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsub.c new file mode 100644 index 0000000000000..c09caeb8207af --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfrsub.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsub_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsub_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsub_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsub_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfrsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfrsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfrsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfrsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfrsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfrsub.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfrsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfrsub_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnj.c new file mode 100644 index 0000000000000..c1f69932f6a02 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnj.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnj_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnj_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnj_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnj_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnj_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnj_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnj_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjn.c new file mode 100644 index 0000000000000..1b799d87d8131 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjn.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjn_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjn_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjn_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjn_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjn_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjn_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjn_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjx.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjx.c new file mode 100644 index 0000000000000..9c5f2af3d6e8f --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsgnjx.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsgnjx_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsgnjx_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsgnjx_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsgnjx_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsgnjx_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsgnjx_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsgnjx_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1down.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1down.c new file mode 100644 index 0000000000000..691302e245427 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1down.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1down_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1down_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1down_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1down_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1down_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1down_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1down_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1down_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1down_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1down_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1down_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1down_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1down_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1down_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1down_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1down_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1down_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1down_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1down_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1down_mu(mask, maskedoff, src, value, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1up.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1up.c new file mode 100644 index 0000000000000..1238d2204c6d4 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfslide1up.c @@ -0,0 +1,249 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1up_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1up_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1up_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1up_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tu(maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1up_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1up_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1up_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1up_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tum(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1up_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1up_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1up_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1up_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_tumu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfslide1up_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfslide1up_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfslide1up_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfslide1up_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfslide1up_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfslide1up_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], bfloat noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[SRC]], bfloat [[VALUE]], [[MASK]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfslide1up_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t src, __bf16 value, size_t vl) { + return __riscv_vfslide1up_mu(mask, maskedoff, src, value, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsub.c new file mode 100644 index 0000000000000..ea4f8f043d6bb --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfsub.c @@ -0,0 +1,489 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vv_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vf_bf16mf4_tu(vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vv_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vf_bf16mf2_tu(vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vv_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m1_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vf_bf16m1_tu(vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vv_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m2_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vf_bf16m2_tu(vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vv_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m4_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vf_bf16m4_tu(vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vv_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m8_tu( +// CHECK-RV64-SAME: [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vf_bf16m8_tu(vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tu(maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vv_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vf_bf16mf4_tum(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vv_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vf_bf16mf2_tum(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vv_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m1_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vf_bf16m1_tum(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vv_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m2_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vf_bf16m2_tum(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vv_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m4_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vf_bf16m4_tum(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vv_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m8_tum( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vf_bf16m8_tum(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tum(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vv_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vf_bf16mf4_tumu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vv_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vf_bf16mf2_tumu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vv_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m1_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vf_bf16m1_tumu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vv_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m2_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vf_bf16m2_tumu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vv_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m4_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vf_bf16m4_tumu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vv_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m8_tumu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vf_bf16m8_tumu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_tumu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vv_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfsub_vf_bf16mf4_mu(vbool64_t mask, vbfloat16mf4_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vv_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16mf2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfsub_vf_bf16mf2_mu(vbool32_t mask, vbfloat16mf2_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vv_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m1_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfsub_vf_bf16m1_mu(vbool16_t mask, vbfloat16m1_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vv_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfsub_vf_bf16m2_mu(vbool8_t mask, vbfloat16m2_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vv_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfsub_vf_bf16m4_mu(vbool4_t mask, vbfloat16m4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vv_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_bf16m8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfsub.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfsub_vf_bf16m8_mu(vbool2_t mask, vbfloat16m8_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vfsub_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwadd.c new file mode 100644 index 0000000000000..e5b7b8da1f3cc --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwadd.c @@ -0,0 +1,1932 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tu(vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwadd_wf_bf16_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwadd_wf_bf16_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwadd_wf_bf16_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwadd_wf_bf16_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwadd_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_bf16_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwadd_wf_bf16_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwadd_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwcvt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwcvt.c new file mode 100644 index 0000000000000..730010421b944 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwcvt.c @@ -0,0 +1,765 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tu(vbfloat16mf4_t vd, vint8mf8_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tu(vbfloat16mf2_t vd, vint8mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tu(vbfloat16m1_t vd, vint8mf2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tu(vbfloat16m2_t vd, vint8m1_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tu(vbfloat16m4_t vd, vint8m2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tu(vbfloat16m8_t vd, vint8m4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tu(vbfloat16mf4_t vd, vuint8mf8_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tu(vbfloat16mf2_t vd, vuint8mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tu(vbfloat16m1_t vd, vuint8mf2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tu(vbfloat16m2_t vd, vuint8m1_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tu(vbfloat16m4_t vd, vuint8m2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tu(vbfloat16m8_t vd, vuint8m4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_bf16_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tu(vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tu(vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tu(vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tu(vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + vint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + vint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + vint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + vint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + vint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + vint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tum(vbool64_t vm, vbfloat16mf4_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tum(vbool32_t vm, vbfloat16mf2_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tum(vbool16_t vm, vbfloat16m1_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tum(vbool8_t vm, vbfloat16m2_t vd, + vuint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tum(vbool4_t vm, vbfloat16m4_t vd, + vuint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tum(vbool2_t vm, vbfloat16m8_t vd, + vuint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tum(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + vint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + vint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + vint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + vint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + vint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + vint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_tumu(vbool64_t vm, vbfloat16mf4_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_tumu(vbool32_t vm, vbfloat16mf2_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_tumu(vbool16_t vm, vbfloat16m1_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_tumu(vbool8_t vm, vbfloat16m2_t vd, + vuint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_tumu(vbool4_t vm, vbfloat16m4_t vd, + vuint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_tumu(vbool2_t vm, vbfloat16m8_t vd, + vuint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_tumu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_x_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + vint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_x_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + vint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_x_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + vint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_x_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + vint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_x_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + vint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_bf16m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_x_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + vint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf4_t test_vfwcvt_f_xu_v_bf16mf4_mu(vbool64_t vm, vbfloat16mf4_t vd, + vuint8mf8_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16mf2_t test_vfwcvt_f_xu_v_bf16mf2_mu(vbool32_t vm, vbfloat16mf2_t vd, + vuint8mf4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m1_t test_vfwcvt_f_xu_v_bf16m1_mu(vbool16_t vm, vbfloat16m1_t vd, + vuint8mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m2_t test_vfwcvt_f_xu_v_bf16m2_mu(vbool8_t vm, vbfloat16m2_t vd, + vuint8m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m4_t test_vfwcvt_f_xu_v_bf16m4_mu(vbool4_t vm, vbfloat16m4_t vd, + vuint8m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_bf16m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbfloat16m8_t test_vfwcvt_f_xu_v_bf16m8_mu(vbool2_t vm, vbfloat16m8_t vd, + vuint8m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_bf16_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwcvt_f_f_v_bf16mf4_f32mf2_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwcvt_f_f_v_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwcvt_f_f_v_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwcvt_f_f_v_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VM]], i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwcvt_f_f_v_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwcvt_f_mu(vm, vd, vs2, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmacc.c new file mode 100644 index 0000000000000..b05f88028874d --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmacc.c @@ -0,0 +1,977 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tum( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_tumu( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, + vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, + vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmacc_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmacc_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmacc_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmacc_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmacc_vf_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmacc_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmsac.c new file mode 100644 index 0000000000000..93721f6a889d9 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmsac.c @@ -0,0 +1,977 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tum( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_tumu( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, + vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, + vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmsac_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmsac_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmsac_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmsac_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmsac_vf_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmsac_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmul.c new file mode 100644 index 0000000000000..4a2b5e39ef2cf --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwmul.c @@ -0,0 +1,975 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwmul_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwmul_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwmul_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwmul_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwmul_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwmul_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmacc.c new file mode 100644 index 0000000000000..57e433441cd40 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmacc.c @@ -0,0 +1,994 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tum(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tum( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, + vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, + vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_tumu( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, + vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, + vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, + vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, + vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmacc_vf_bf16mf4_f32mf2_rm_mu( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmacc_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmacc_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmacc_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmacc_vf_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmacc_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmacc_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmsac.c new file mode 100644 index 0000000000000..42da060126a2b --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwnmsac.c @@ -0,0 +1,994 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tum(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, + vfloat32mf2_t vd, __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + __bf16 vs1, vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + __bf16 vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, __bf16 vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, __bf16 vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tu(vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tum( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, + vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, + vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tum(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_tumu( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, + vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, + vfloat32m1_t vd, __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, + vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, + vfloat32m2_t vd, __bf16 vs1, + vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, + vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, + vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_tumu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs1, + vbfloat16mf4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16.nxv1bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwnmsac_vf_bf16mf4_f32mf2_rm_mu( + vbool64_t vm, vfloat32mf2_t vd, __bf16 vs1, vbfloat16mf4_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16.nxv2bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwnmsac_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + __bf16 vs1, + vbfloat16mf2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs1, + vbfloat16m1_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16.nxv4bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwnmsac_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + __bf16 vs1, vbfloat16m1_t vs2, + size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs1, + vbfloat16m2_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16.nxv8bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwnmsac_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + __bf16 vs1, vbfloat16m2_t vs2, + size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs1, + vbfloat16m4_t vs2, size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwnmsac_vf_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], bfloat noundef [[VS1:%.*]], [[VS2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16.nxv16bf16.i64( [[VD]], bfloat [[VS1]], [[VS2]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwnmsac_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + __bf16 vs1, vbfloat16m4_t vs2, + size_t vl) { + return __riscv_vfwnmsac_mu(vm, vd, vs1, vs2, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwsub.c new file mode 100644 index 0000000000000..1378bc963b216 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vfwsub.c @@ -0,0 +1,1932 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tu(vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tu(vfloat32m2_t vd, vbfloat16m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tu(vfloat32m4_t vd, vbfloat16m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tu(vfloat32m8_t vd, vbfloat16m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vbfloat16mf4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, vbfloat16m1_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, vbfloat16m2_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, vbfloat16m4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 7, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tu(vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tu(vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tu(vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tu(vfloat32m1_t vd, vfloat32m1_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tu(vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tu(vfloat32m2_t vd, vfloat32m2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tu(vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tu(vfloat32m4_t vd, vfloat32m4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tu(vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tu(vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_rm_tu( +// CHECK-RV64-SAME: [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tu(vfloat32m8_t vd, vfloat32m8_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_wf_tu(vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tum(vbool64_t vm, + vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tum(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tum(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tum(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tum(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tum(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_rm_tum( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 2) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tum(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tum(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_tumu(vbool64_t vm, + vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_tumu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_tumu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_tumu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_tumu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_tumu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_rm_tumu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 0) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_tumu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_tumu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_vf_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vbfloat16mf4_t vs2, + __bf16 rs1, size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf4_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wv_bf16mf4_f32mf2_rm_mu(vbool64_t vm, + vfloat32mf2_t vd, + vfloat32mf2_t vs2, + vbfloat16mf4_t vs1, + size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32mf2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32mf2_t test_vfwsub_wf_bf16_f32mf2_rm_mu(vbool64_t vm, vfloat32mf2_t vd, + vfloat32mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_vf_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vbfloat16mf2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16mf2_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wv_bf16mf2_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, + vbfloat16mf2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m1_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m1_t test_vfwsub_wf_bf16_f32m1_rm_mu(vbool32_t vm, vfloat32m1_t vd, + vfloat32m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_vf_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vbfloat16m1_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m1_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wv_bf16m1_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, + vbfloat16m1_t vs1, size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m2_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m2_t test_vfwsub_wf_bf16_f32m2_rm_mu(vbool16_t vm, vfloat32m2_t vd, + vfloat32m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_vf_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vbfloat16m2_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m2_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wv_bf16m2_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, + vbfloat16m2_t vs1, size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m4_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m4_t test_vfwsub_wf_bf16_f32m4_rm_mu(vbool8_t vm, vfloat32m4_t vd, + vfloat32m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_vv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_vf_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vbfloat16m4_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_vf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_bf16m4_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16.i64( [[VD]], [[VS2]], [[VS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wv_bf16m4_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, + vbfloat16m4_t vs1, size_t vl) { + return __riscv_vfwsub_wv_mu(vm, vd, vs2, vs1, __RISCV_FRM_RNE, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_bf16_f32m8_rm_mu( +// CHECK-RV64-SAME: [[VM:%.*]], [[VD:%.*]], [[VS2:%.*]], bfloat noundef [[RS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16.i64( [[VD]], [[VS2]], bfloat [[RS1]], [[VM]], i64 0, i64 [[VL]], i64 1) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vfloat32m8_t test_vfwsub_wf_bf16_f32m8_rm_mu(vbool4_t vm, vfloat32m8_t vd, + vfloat32m8_t vs2, __bf16 rs1, + size_t vl) { + return __riscv_vfwsub_wf_mu(vm, vd, vs2, rs1, __RISCV_FRM_RNE, vl); +} diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfeq.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfeq.c new file mode 100644 index 0000000000000..3945f826809b0 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfeq.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfeq_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfeq_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfeq_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfeq_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfeq_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfeq_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfeq.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfeq_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfeq_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfge.c new file mode 100644 index 0000000000000..82586da09b325 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfge.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfge_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfge_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfge_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfge_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfge_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfge_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfge.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfge_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfge_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfgt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfgt.c new file mode 100644 index 0000000000000..75ccbbc1e8a1e --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfgt.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfgt_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfgt_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfgt_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfgt_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfgt_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfgt_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfgt.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfgt_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfgt_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfle.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfle.c new file mode 100644 index 0000000000000..49ff1c9812d62 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfle.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfle_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfle_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfle_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfle_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfle_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfle_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfle.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfle_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfle_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmflt.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmflt.c new file mode 100644 index 0000000000000..24b3f9c16d943 --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmflt.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmflt_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmflt_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmflt_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmflt_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmflt_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmflt_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmflt.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmflt_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmflt_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfne.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfne.c new file mode 100644 index 0000000000000..ca3e134910dfb --- /dev/null +++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvfbfa/policy/overloaded/vmfne.c @@ -0,0 +1,129 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// REQUIRES: riscv-registered-target +// RUN: %clang_cc1 -triple riscv64 -target-feature +v \ +// RUN: -target-feature +experimental-zvfbfa -disable-O0-optnone \ +// RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \ +// RUN: FileCheck --check-prefix=CHECK-RV64 %s + +#include + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1bf16.nxv1bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vv_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, vbfloat16mf4_t op2, size_t vl) { + return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16mf4_b64_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv1bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool64_t test_vmfne_vf_bf16mf4_b64_mu(vbool64_t mask, vbool64_t maskedoff, vbfloat16mf4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2bf16.nxv2bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vv_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, vbfloat16mf2_t op2, size_t vl) { + return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16mf2_b32_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv2bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool32_t test_vmfne_vf_bf16mf2_b32_mu(vbool32_t mask, vbool32_t maskedoff, vbfloat16mf2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4bf16.nxv4bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vv_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, vbfloat16m1_t op2, size_t vl) { + return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m1_b16_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv4bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool16_t test_vmfne_vf_bf16m1_b16_mu(vbool16_t mask, vbool16_t maskedoff, vbfloat16m1_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8bf16.nxv8bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vv_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, vbfloat16m2_t op2, size_t vl) { + return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m2_b8_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv8bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool8_t test_vmfne_vf_bf16m2_b8_mu(vbool8_t mask, vbool8_t maskedoff, vbfloat16m2_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16bf16.nxv16bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vv_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, vbfloat16m4_t op2, size_t vl) { + return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m4_b4_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv16bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool4_t test_vmfne_vf_bf16m4_b4_mu(vbool4_t mask, vbool4_t maskedoff, vbfloat16m4_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv32bf16.nxv32bf16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfne_vv_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, vbfloat16m8_t op2, size_t vl) { + return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_bf16m8_b2_mu( +// CHECK-RV64-SAME: [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], bfloat noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: [[ENTRY:.*:]] +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.vmfne.mask.nxv32bf16.bf16.i64( [[MASKEDOFF]], [[OP1]], bfloat [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vbool2_t test_vmfne_vf_bf16m8_b2_mu(vbool2_t mask, vbool2_t maskedoff, vbfloat16m8_t op1, __bf16 op2, size_t vl) { + return __riscv_vmfne_mu(mask, maskedoff, op1, op2, vl); +} + diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h index 70b7c430c410e..649fdb0791664 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h @@ -142,6 +142,12 @@ enum { ReadsPastVLShift = DestEEWShift + 2, ReadsPastVLMask = 1ULL << ReadsPastVLShift, + + // 0 -> Don't care about altfmt bit in VTYPE. + // 1 -> Is not altfmt. + // 2 -> Is altfmt(BF16). + AltFmtTypeShift = ReadsPastVLShift + 1, + AltFmtTypeMask = 3ULL << AltFmtTypeShift, }; // Helper functions to read TSFlags. @@ -183,6 +189,11 @@ static inline bool hasRoundModeOp(uint64_t TSFlags) { return TSFlags & HasRoundModeOpMask; } +enum class AltFmtType { DontCare, IsNotAltFmt, IsAltFmt }; +static inline AltFmtType getAltFmtType(uint64_t TSFlags) { + return static_cast((TSFlags & AltFmtTypeMask) >> AltFmtTypeShift); +} + /// \returns true if this instruction uses vxrm static inline bool usesVXRM(uint64_t TSFlags) { return TSFlags & UsesVXRMMask; } diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp index 90e1c47a71c89..5a90f469165fb 100644 --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -164,6 +164,7 @@ struct DemandedFields { // If this is true, we demand that VTYPE is set to some legal state, i.e. that // vill is unset. bool VILL = false; + bool AltFmt = false; // Return true if any part of VTYPE was used bool usedVTYPE() const { @@ -183,6 +184,7 @@ struct DemandedFields { TailPolicy = true; MaskPolicy = true; VILL = true; + AltFmt = true; } // Mark all VL properties as demanded @@ -208,6 +210,7 @@ struct DemandedFields { TailPolicy |= B.TailPolicy; MaskPolicy |= B.MaskPolicy; VILL |= B.VILL; + AltFmt |= B.AltFmt; } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) @@ -324,6 +327,9 @@ static bool areCompatibleVTYPEs(uint64_t CurVType, uint64_t NewVType, if (Used.MaskPolicy && RISCVVType::isMaskAgnostic(CurVType) != RISCVVType::isMaskAgnostic(NewVType)) return false; + if (Used.AltFmt && + RISCVVType::isAltFmt(CurVType) != RISCVVType::isAltFmt(NewVType)) + return false; return true; } @@ -475,6 +481,9 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) { Res.TailPolicy = false; } + Res.AltFmt = RISCVII::getAltFmtType(MI.getDesc().TSFlags) != + RISCVII::AltFmtType::DontCare; + return Res; } @@ -506,6 +515,7 @@ class VSETVLIInfo { uint8_t TailAgnostic : 1; uint8_t MaskAgnostic : 1; uint8_t SEWLMULRatioOnly : 1; + uint8_t AltFmt : 1; public: VSETVLIInfo() @@ -582,6 +592,7 @@ class VSETVLIInfo { RISCVVType::VLMUL getVLMUL() const { return VLMul; } bool getTailAgnostic() const { return TailAgnostic; } bool getMaskAgnostic() const { return MaskAgnostic; } + bool getAltFmt() const { return AltFmt; } bool hasNonZeroAVL(const LiveIntervals *LIS) const { if (hasAVLImm()) @@ -643,14 +654,16 @@ class VSETVLIInfo { SEW = RISCVVType::getSEW(VType); TailAgnostic = RISCVVType::isTailAgnostic(VType); MaskAgnostic = RISCVVType::isMaskAgnostic(VType); + AltFmt = RISCVVType::isAltFmt(VType); } - void setVTYPE(RISCVVType::VLMUL L, unsigned S, bool TA, bool MA) { + void setVTYPE(RISCVVType::VLMUL L, unsigned S, bool TA, bool MA, bool AF) { assert(isValid() && !isUnknown() && "Can't set VTYPE for uninitialized or unknown"); VLMul = L; SEW = S; TailAgnostic = TA; MaskAgnostic = MA; + AltFmt = AF; } void setVLMul(RISCVVType::VLMUL VLMul) { this->VLMul = VLMul; } @@ -658,7 +671,8 @@ class VSETVLIInfo { unsigned encodeVTYPE() const { assert(isValid() && !isUnknown() && !SEWLMULRatioOnly && "Can't encode VTYPE for uninitialized or unknown"); - return RISCVVType::encodeVTYPE(VLMul, SEW, TailAgnostic, MaskAgnostic); + return RISCVVType::encodeVTYPE(VLMul, SEW, TailAgnostic, MaskAgnostic, + AltFmt); } bool hasSEWLMULRatioOnly() const { return SEWLMULRatioOnly; } @@ -670,9 +684,9 @@ class VSETVLIInfo { "Can't compare VTYPE in unknown state"); assert(!SEWLMULRatioOnly && !Other.SEWLMULRatioOnly && "Can't compare when only LMUL/SEW ratio is valid."); - return std::tie(VLMul, SEW, TailAgnostic, MaskAgnostic) == + return std::tie(VLMul, SEW, TailAgnostic, MaskAgnostic, AltFmt) == std::tie(Other.VLMul, Other.SEW, Other.TailAgnostic, - Other.MaskAgnostic); + Other.MaskAgnostic, Other.AltFmt); } unsigned getSEWLMULRatio() const { @@ -1001,6 +1015,8 @@ RISCVInsertVSETVLI::computeInfoForInstr(const MachineInstr &MI) const { RISCVVType::VLMUL VLMul = RISCVII::getLMul(TSFlags); + bool AltFmt = + RISCVII::getAltFmtType(TSFlags) == RISCVII::AltFmtType::IsAltFmt; unsigned Log2SEW = MI.getOperand(getSEWOpNum(MI)).getImm(); // A Log2SEW of 0 is an operation on mask registers only. unsigned SEW = Log2SEW ? 1 << Log2SEW : 8; @@ -1041,7 +1057,7 @@ RISCVInsertVSETVLI::computeInfoForInstr(const MachineInstr &MI) const { assert(SEW == EEW && "Initial SEW doesn't match expected EEW"); } #endif - InstrInfo.setVTYPE(VLMul, SEW, TailAgnostic, MaskAgnostic); + InstrInfo.setVTYPE(VLMul, SEW, TailAgnostic, MaskAgnostic, AltFmt); forwardVSETVLIAVL(InstrInfo); @@ -1194,7 +1210,8 @@ void RISCVInsertVSETVLI::transferBefore(VSETVLIInfo &Info, // be coalesced into another vsetvli since we won't demand any fields. VSETVLIInfo NewInfo; // Need a new VSETVLIInfo to clear SEWLMULRatioOnly NewInfo.setAVLImm(1); - NewInfo.setVTYPE(RISCVVType::LMUL_1, /*sew*/ 8, /*ta*/ true, /*ma*/ true); + NewInfo.setVTYPE(RISCVVType::LMUL_1, /*sew*/ 8, /*ta*/ true, /*ma*/ true, + /*AF*/ false); Info = NewInfo; return; } @@ -1236,7 +1253,8 @@ void RISCVInsertVSETVLI::transferBefore(VSETVLIInfo &Info, (Demanded.TailPolicy ? IncomingInfo : Info).getTailAgnostic() || IncomingInfo.getTailAgnostic(), (Demanded.MaskPolicy ? IncomingInfo : Info).getMaskAgnostic() || - IncomingInfo.getMaskAgnostic()); + IncomingInfo.getMaskAgnostic(), + Demanded.AltFmt ? IncomingInfo.getAltFmt() : 0); // If we only knew the sew/lmul ratio previously, replace the VTYPE but keep // the AVL. diff --git a/llvm/lib/Target/RISCV/RISCVInstrFormats.td b/llvm/lib/Target/RISCV/RISCVInstrFormats.td index 2afd77a96373b..7b262f2950ba5 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrFormats.td +++ b/llvm/lib/Target/RISCV/RISCVInstrFormats.td @@ -267,6 +267,12 @@ class RVInstCommon Don't care about altfmt bit in VTYPE. + // 1 -> Is not altfmt. + // 2 -> Is altfmt(BF16). + bits<2> AltFmtType = 0; + let TSFlags{28-27} = AltFmtType; } class RVInst { ["_M4", ""], ["_M8", ""], ["_SE", ""], - ["_RM", ""] + ["_RM", ""], + ["_ALT", ""] ]; string VInst = !foldl(PseudoInst, AffixSubsts, Acc, AffixSubst, !subst(AffixSubst[0], AffixSubst[1], Acc)); @@ -6403,7 +6407,7 @@ let Defs = [VXSAT] in { // 13. Vector Floating-Point Instructions //===----------------------------------------------------------------------===// -let Predicates = [HasVInstructionsAnyF] in { +let Predicates = [HasVInstructionsAnyF], AltFmtType = IS_NOT_ALTFMT in { //===----------------------------------------------------------------------===// // 13.2. Vector Single-Width Floating-Point Add/Subtract Instructions //===----------------------------------------------------------------------===// @@ -6600,7 +6604,7 @@ defm PseudoVWREDSUM : VPseudoVWRED_VS; } } // Predicates = [HasVInstructions] -let Predicates = [HasVInstructionsAnyF] in { +let Predicates = [HasVInstructionsAnyF], AltFmtType = IS_NOT_ALTFMT in { //===----------------------------------------------------------------------===// // 14.3. Vector Single-Width Floating-Point Reduction Instructions //===----------------------------------------------------------------------===// @@ -6710,7 +6714,7 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in { // 16.2. Floating-Point Scalar Move Instructions //===----------------------------------------------------------------------===// -let Predicates = [HasVInstructionsAnyF] in { +let Predicates = [HasVInstructionsAnyF], AltFmtType = IS_NOT_ALTFMT in { let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in { foreach f = FPList in { let HasSEWOp = 1, BaseInstr = VFMV_F_S in @@ -6738,7 +6742,7 @@ let Predicates = [HasVInstructions] in { defm PseudoVSLIDE1DOWN : VPseudoVSLD1_VX; } // Predicates = [HasVInstructions] -let Predicates = [HasVInstructionsAnyF] in { +let Predicates = [HasVInstructionsAnyF], AltFmtType = IS_NOT_ALTFMT in { defm PseudoVFSLIDE1UP : VPseudoVSLD1_VF<"@earlyclobber $rd">; defm PseudoVFSLIDE1DOWN : VPseudoVSLD1_VF; } // Predicates = [HasVInstructionsAnyF] diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td index 0be9eab6870ec..a346b763b7941 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td @@ -44,6 +44,336 @@ let Predicates = [HasStdExtZvfbfminOrZvfofp8min] in { let mayRaiseFPException = true, Predicates = [HasStdExtZvfbfwma] in defm PseudoVFWMACCBF16 : VPseudoVWMAC_VV_VF_BF_RM; +defset list AllWidenableIntToBF16Vectors = { + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; +} + +multiclass VPseudoVALU_VV_VF_RM_BF16 { + foreach m = MxListF in { + defm "" : VPseudoBinaryFV_VV_RM, + SchedBinary<"WriteVFALUV", "ReadVFALUV", "ReadVFALUV", m.MX, + 16/*sew*/, forcePassthruRead=true>; + } + + defvar f = SCALAR_F16; + foreach m = f.MxList in { + defm "" : VPseudoBinaryV_VF_RM, + SchedBinary<"WriteVFALUF", "ReadVFALUV", "ReadVFALUF", m.MX, + f.SEW, forcePassthruRead=true>; + } +} + +multiclass VPseudoVALU_VF_RM_BF16 { + defvar f = SCALAR_F16; + foreach m = f.MxList in { + defm "" : VPseudoBinaryV_VF_RM, + SchedBinary<"WriteVFALUF", "ReadVFALUV", "ReadVFALUF", m.MX, + f.SEW, forcePassthruRead=true>; + } +} + +multiclass VPseudoVFWALU_VV_VF_RM_BF16 { + foreach m = MxListFW in { + defm "" : VPseudoBinaryW_VV_RM, + SchedBinary<"WriteVFWALUV", "ReadVFWALUV", "ReadVFWALUV", m.MX, + 16/*sew*/, forcePassthruRead=true>; + } + + defvar f = SCALAR_F16; + foreach m = f.MxListFW in { + defm "" : VPseudoBinaryW_VF_RM, + SchedBinary<"WriteVFWALUF", "ReadVFWALUV", "ReadVFWALUF", m.MX, + f.SEW, forcePassthruRead=true>; + } +} + +multiclass VPseudoVFWALU_WV_WF_RM_BF16 { + foreach m = MxListFW in { + defm "" : VPseudoBinaryW_WV_RM, + SchedBinary<"WriteVFWALUV", "ReadVFWALUV", "ReadVFWALUV", m.MX, + 16/*sew*/, forcePassthruRead=true>; + } + defvar f = SCALAR_F16; + foreach m = f.MxListFW in { + defm "" : VPseudoBinaryW_WF_RM, + SchedBinary<"WriteVFWALUF", "ReadVFWALUV", "ReadVFWALUF", m.MX, + f.SEW, forcePassthruRead=true>; + } +} + +multiclass VPseudoVFMUL_VV_VF_RM_BF16 { + foreach m = MxListF in { + defm "" : VPseudoBinaryFV_VV_RM, + SchedBinary<"WriteVFMulV", "ReadVFMulV", "ReadVFMulV", m.MX, + 16/*sew*/, forcePassthruRead=true>; + } + + defvar f = SCALAR_F16; + foreach m = f.MxList in { + defm "" : VPseudoBinaryV_VF_RM, + SchedBinary<"WriteVFMulF", "ReadVFMulV", "ReadVFMulF", m.MX, + f.SEW, forcePassthruRead=true>; + } +} + +multiclass VPseudoVWMUL_VV_VF_RM_BF16 { + foreach m = MxListFW in { + defm "" : VPseudoBinaryW_VV_RM, + SchedBinary<"WriteVFWMulV", "ReadVFWMulV", "ReadVFWMulV", m.MX, + 16/*sew*/, forcePassthruRead=true>; + } + + defvar f = SCALAR_F16; + foreach m = f.MxListFW in { + defm "" : VPseudoBinaryW_VF_RM, + SchedBinary<"WriteVFWMulF", "ReadVFWMulV", "ReadVFWMulF", m.MX, + f.SEW, forcePassthruRead=true>; + } +} + +multiclass VPseudoVMAC_VV_VF_AAXA_RM_BF16 { + foreach m = MxListF in { + defm "" : VPseudoTernaryV_VV_AAXA_RM, + SchedTernary<"WriteVFMulAddV", "ReadVFMulAddV", "ReadVFMulAddV", + "ReadVFMulAddV", m.MX, 16/*sew*/>; + } + + defvar f = SCALAR_F16; + foreach m = f.MxList in { + defm "" : VPseudoTernaryV_VF_AAXA_RM, + SchedTernary<"WriteVFMulAddF", "ReadVFMulAddV", "ReadVFMulAddF", + "ReadVFMulAddV", m.MX, f.SEW>; + } +} + +multiclass VPseudoVWMAC_VV_VF_RM_BF16 { + foreach m = MxListFW in { + defm "" : VPseudoTernaryW_VV_RM, + SchedTernary<"WriteVFWMulAddV", "ReadVFWMulAddV", + "ReadVFWMulAddV", "ReadVFWMulAddV", m.MX, 16/*sew*/>; + } + + defvar f = SCALAR_F16; + foreach m = f.MxListFW in { + defm "" : VPseudoTernaryW_VF_RM, + SchedTernary<"WriteVFWMulAddF", "ReadVFWMulAddV", + "ReadVFWMulAddF", "ReadVFWMulAddV", m.MX, f.SEW>; + } +} + +multiclass VPseudoVRCP_V_BF16 { + foreach m = MxListF in { + defvar mx = m.MX; + let VLMul = m.value in { + def "_V_" # mx # "_E16" + : VPseudoUnaryNoMask, + SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, 16/*sew*/, + forcePassthruRead=true>; + def "_V_" # mx # "_E16_MASK" + : VPseudoUnaryMask, + RISCVMaskedPseudo, + SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, 16/*sew*/, + forcePassthruRead=true>; + } + } +} + +multiclass VPseudoVRCP_V_RM_BF16 { + foreach m = MxListF in { + defvar mx = m.MX; + let VLMul = m.value in { + def "_V_" # mx # "_E16" + : VPseudoUnaryNoMaskRoundingMode, + SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, 16/*sew*/, + forcePassthruRead=true>; + def "_V_" # mx # "_E16_MASK" + : VPseudoUnaryMaskRoundingMode, + RISCVMaskedPseudo, + SchedUnary<"WriteVFRecpV", "ReadVFRecpV", mx, 16/*sew*/, + forcePassthruRead=true>; + } + } +} + +multiclass VPseudoVMAX_VV_VF_BF16 { + foreach m = MxListF in { + defm "" : VPseudoBinaryV_VV, + SchedBinary<"WriteVFMinMaxV", "ReadVFMinMaxV", "ReadVFMinMaxV", + m.MX, 16/*sew*/, forcePassthruRead=true>; + } + + defvar f = SCALAR_F16; + foreach m = f.MxList in { + defm "" : VPseudoBinaryV_VF, + SchedBinary<"WriteVFMinMaxF", "ReadVFMinMaxV", "ReadVFMinMaxF", + m.MX, f.SEW, forcePassthruRead=true>; + } +} + +multiclass VPseudoVSGNJ_VV_VF_BF16 { + foreach m = MxListF in { + defm "" : VPseudoBinaryV_VV, + SchedBinary<"WriteVFSgnjV", "ReadVFSgnjV", "ReadVFSgnjV", m.MX, + 16/*sew*/, forcePassthruRead=true>; + } + + defvar f = SCALAR_F16; + foreach m = f.MxList in { + defm "" : VPseudoBinaryV_VF, + SchedBinary<"WriteVFSgnjF", "ReadVFSgnjV", "ReadVFSgnjF", m.MX, + f.SEW, forcePassthruRead=true>; + } +} + +multiclass VPseudoVWCVTF_V_BF16 { + defvar constraint = "@earlyclobber $rd"; + foreach m = MxListW in + defm _V : VPseudoConversion, + SchedUnary<"WriteVFWCvtIToFV", "ReadVFWCvtIToFV", m.MX, 8/*sew*/, + forcePassthruRead=true>; +} + +multiclass VPseudoVWCVTD_V_BF16 { + defvar constraint = "@earlyclobber $rd"; + foreach m = MxListFW in + defm _V : VPseudoConversion, + SchedUnary<"WriteVFWCvtFToFV", "ReadVFWCvtFToFV", m.MX, 16/*sew*/, + forcePassthruRead=true>; +} + +multiclass VPseudoVNCVTD_W_BF16 { + defvar constraint = "@earlyclobber $rd"; + foreach m = MxListFW in + defm _W : VPseudoConversion, + SchedUnary<"WriteVFNCvtFToFV", "ReadVFNCvtFToFV", m.MX, 16/*sew*/, + forcePassthruRead=true>; +} + +multiclass VPseudoVNCVTD_W_RM_BF16 { + defvar constraint = "@earlyclobber $rd"; + foreach m = MxListFW in + defm _W : VPseudoConversionRoundingMode, + SchedUnary<"WriteVFNCvtFToFV", "ReadVFNCvtFToFV", m.MX, 16/*sew*/, + forcePassthruRead=true>; +} + +let Predicates = [HasStdExtZvfbfa], AltFmtType = IS_ALTFMT in { +let mayRaiseFPException = true in { +defm PseudoVFADD_ALT : VPseudoVALU_VV_VF_RM_BF16; +defm PseudoVFSUB_ALT : VPseudoVALU_VV_VF_RM_BF16; +defm PseudoVFRSUB_ALT : VPseudoVALU_VF_RM_BF16; +} + +let mayRaiseFPException = true in { +defm PseudoVFWADD_ALT : VPseudoVFWALU_VV_VF_RM_BF16; +defm PseudoVFWSUB_ALT : VPseudoVFWALU_VV_VF_RM_BF16; +defm PseudoVFWADD_ALT : VPseudoVFWALU_WV_WF_RM_BF16; +defm PseudoVFWSUB_ALT : VPseudoVFWALU_WV_WF_RM_BF16; +} + +let mayRaiseFPException = true in +defm PseudoVFMUL_ALT : VPseudoVFMUL_VV_VF_RM_BF16; + +let mayRaiseFPException = true in +defm PseudoVFWMUL_ALT : VPseudoVWMUL_VV_VF_RM_BF16; + +let mayRaiseFPException = true in { +defm PseudoVFMACC_ALT : VPseudoVMAC_VV_VF_AAXA_RM_BF16; +defm PseudoVFNMACC_ALT : VPseudoVMAC_VV_VF_AAXA_RM_BF16; +defm PseudoVFMSAC_ALT : VPseudoVMAC_VV_VF_AAXA_RM_BF16; +defm PseudoVFNMSAC_ALT : VPseudoVMAC_VV_VF_AAXA_RM_BF16; +defm PseudoVFMADD_ALT : VPseudoVMAC_VV_VF_AAXA_RM_BF16; +defm PseudoVFNMADD_ALT : VPseudoVMAC_VV_VF_AAXA_RM_BF16; +defm PseudoVFMSUB_ALT : VPseudoVMAC_VV_VF_AAXA_RM_BF16; +defm PseudoVFNMSUB_ALT : VPseudoVMAC_VV_VF_AAXA_RM_BF16; +} + +let mayRaiseFPException = true in { +defm PseudoVFWMACC_ALT : VPseudoVWMAC_VV_VF_RM_BF16; +defm PseudoVFWNMACC_ALT : VPseudoVWMAC_VV_VF_RM_BF16; +defm PseudoVFWMSAC_ALT : VPseudoVWMAC_VV_VF_RM_BF16; +defm PseudoVFWNMSAC_ALT : VPseudoVWMAC_VV_VF_RM_BF16; +} + +let mayRaiseFPException = true in +defm PseudoVFRSQRT7_ALT : VPseudoVRCP_V_BF16; + +let mayRaiseFPException = true in +defm PseudoVFREC7_ALT : VPseudoVRCP_V_RM_BF16; + +let mayRaiseFPException = true in { +defm PseudoVFMIN_ALT : VPseudoVMAX_VV_VF_BF16; +defm PseudoVFMAX_ALT : VPseudoVMAX_VV_VF_BF16; +} + +defm PseudoVFSGNJ_ALT : VPseudoVSGNJ_VV_VF_BF16; +defm PseudoVFSGNJN_ALT : VPseudoVSGNJ_VV_VF_BF16; +defm PseudoVFSGNJX_ALT : VPseudoVSGNJ_VV_VF_BF16; + +let mayRaiseFPException = true in { +defm PseudoVMFEQ_ALT : VPseudoVCMPM_VV_VF; +defm PseudoVMFNE_ALT : VPseudoVCMPM_VV_VF; +defm PseudoVMFLT_ALT : VPseudoVCMPM_VV_VF; +defm PseudoVMFLE_ALT : VPseudoVCMPM_VV_VF; +defm PseudoVMFGT_ALT : VPseudoVCMPM_VF; +defm PseudoVMFGE_ALT : VPseudoVCMPM_VF; +} + +defm PseudoVFCLASS_ALT : VPseudoVCLS_V; + +defm PseudoVFMERGE_ALT : VPseudoVMRG_FM; + +defm PseudoVFMV_V_ALT : VPseudoVMV_F; + +let mayRaiseFPException = true in { +defm PseudoVFWCVT_F_XU_ALT : VPseudoVWCVTF_V_BF16; +defm PseudoVFWCVT_F_X_ALT : VPseudoVWCVTF_V_BF16; + +defm PseudoVFWCVT_F_F_ALT : VPseudoVWCVTD_V_BF16; +} // mayRaiseFPException = true + +let mayRaiseFPException = true in { +let hasSideEffects = 0, hasPostISelHook = 1 in { +defm PseudoVFNCVT_XU_F_ALT : VPseudoVNCVTI_W_RM; +defm PseudoVFNCVT_X_F_ALT : VPseudoVNCVTI_W_RM; +} + +defm PseudoVFNCVT_RTZ_XU_F_ALT : VPseudoVNCVTI_W; +defm PseudoVFNCVT_RTZ_X_F_ALT : VPseudoVNCVTI_W; + +defm PseudoVFNCVT_F_F_ALT : VPseudoVNCVTD_W_RM_BF16; + +defm PseudoVFNCVT_ROD_F_F_ALT : VPseudoVNCVTD_W_BF16; +} // mayRaiseFPException = true + +let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in { + defvar f = SCALAR_F16; + let HasSEWOp = 1, BaseInstr = VFMV_F_S in + def "PseudoVFMV_" # f.FX # "_S_ALT" : + RISCVVPseudo<(outs f.fprclass:$rd), (ins VR:$rs2, sew:$sew)>, + Sched<[WriteVMovFS, ReadVMovFS]>; + let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VFMV_S_F, isReMaterializable = 1, + Constraints = "$rd = $passthru" in + def "PseudoVFMV_S_" # f.FX # "_ALT" : + RISCVVPseudo<(outs VR:$rd), + (ins VR:$passthru, f.fprclass:$rs1, AVL:$vl, sew:$sew)>, + Sched<[WriteVMovSF, ReadVMovSF_V, ReadVMovSF_F]>; +} + +defm PseudoVFSLIDE1UP_ALT : VPseudoVSLD1_VF<"@earlyclobber $rd">; +defm PseudoVFSLIDE1DOWN_ALT : VPseudoVSLD1_VF; +} // Predicates = [HasStdExtZvfbfa], AltFmtType = IS_ALTFMT + //===----------------------------------------------------------------------===// // Patterns //===----------------------------------------------------------------------===// @@ -87,6 +417,130 @@ let Predicates = [HasStdExtZvfbfminOrZvfofp8min] in { FRM_DYN, fvti.AVL, fvti.Log2SEW, TA_MA)>; } + + defm : VPatUnaryV_V_AnyMask<"int_riscv_vcompress", "PseudoVCOMPRESS", AllBF16Vectors>; + defm : VPatBinaryV_VV_VX_VI_INT<"int_riscv_vrgather", "PseudoVRGATHER", + AllBF16Vectors, uimm5>; + defm : VPatBinaryV_VV_INT_EEW<"int_riscv_vrgatherei16_vv", "PseudoVRGATHEREI16", + eew=16, vtilist=AllBF16Vectors>; + defm : VPatTernaryV_VX_VI<"int_riscv_vslideup", "PseudoVSLIDEUP", AllBF16Vectors, uimm5>; + defm : VPatTernaryV_VX_VI<"int_riscv_vslidedown", "PseudoVSLIDEDOWN", AllBF16Vectors, uimm5>; + + foreach fvti = AllBF16Vectors in { + defm : VPatBinaryCarryInTAIL<"int_riscv_vmerge", "PseudoVMERGE", "VVM", + fvti.Vector, + fvti.Vector, fvti.Vector, fvti.Mask, + fvti.Log2SEW, fvti.LMul, fvti.RegClass, + fvti.RegClass, fvti.RegClass>; + defm : VPatBinaryCarryInTAIL<"int_riscv_vfmerge", "PseudoVFMERGE", + "V"#fvti.ScalarSuffix#"M", + fvti.Vector, + fvti.Vector, fvti.Scalar, fvti.Mask, + fvti.Log2SEW, fvti.LMul, fvti.RegClass, + fvti.RegClass, fvti.ScalarRegClass>; + defvar instr = !cast("PseudoVMERGE_VIM_"#fvti.LMul.MX); + def : Pat<(fvti.Vector (int_riscv_vfmerge (fvti.Vector fvti.RegClass:$passthru), + (fvti.Vector fvti.RegClass:$rs2), + (fvti.Scalar (fpimm0)), + (fvti.Mask VMV0:$vm), VLOpFrag)), + (instr fvti.RegClass:$passthru, fvti.RegClass:$rs2, 0, + (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW)>; + + defvar ivti = GetIntVTypeInfo.Vti; + def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm), fvti.RegClass:$rs1, + fvti.RegClass:$rs2)), + (!cast("PseudoVMERGE_VVM_"#fvti.LMul.MX) + (fvti.Vector (IMPLICIT_DEF)), + fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask VMV0:$vm), + fvti.AVL, fvti.Log2SEW)>; + + def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm), + (SplatFPOp (SelectScalarFPAsInt (XLenVT GPR:$imm))), + fvti.RegClass:$rs2)), + (!cast("PseudoVMERGE_VXM_"#fvti.LMul.MX) + (fvti.Vector (IMPLICIT_DEF)), + fvti.RegClass:$rs2, GPR:$imm, (fvti.Mask VMV0:$vm), fvti.AVL, fvti.Log2SEW)>; + + def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm), + (SplatFPOp (fvti.Scalar fpimm0)), + fvti.RegClass:$rs2)), + (!cast("PseudoVMERGE_VIM_"#fvti.LMul.MX) + (fvti.Vector (IMPLICIT_DEF)), + fvti.RegClass:$rs2, 0, (fvti.Mask VMV0:$vm), fvti.AVL, fvti.Log2SEW)>; + + def : Pat<(fvti.Vector (vselect (fvti.Mask VMV0:$vm), + (SplatFPOp fvti.ScalarRegClass:$rs1), + fvti.RegClass:$rs2)), + (!cast("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX) + (fvti.Vector (IMPLICIT_DEF)), + fvti.RegClass:$rs2, + (fvti.Scalar fvti.ScalarRegClass:$rs1), + (fvti.Mask VMV0:$vm), fvti.AVL, fvti.Log2SEW)>; + + def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask VMV0:$vm), + fvti.RegClass:$rs1, + fvti.RegClass:$rs2, + fvti.RegClass:$passthru, + VLOpFrag)), + (!cast("PseudoVMERGE_VVM_"#fvti.LMul.MX) + fvti.RegClass:$passthru, fvti.RegClass:$rs2, fvti.RegClass:$rs1, (fvti.Mask VMV0:$vm), + GPR:$vl, fvti.Log2SEW)>; + + def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask VMV0:$vm), + (SplatFPOp (SelectScalarFPAsInt (XLenVT GPR:$imm))), + fvti.RegClass:$rs2, + fvti.RegClass:$passthru, + VLOpFrag)), + (!cast("PseudoVMERGE_VXM_"#fvti.LMul.MX) + fvti.RegClass:$passthru, fvti.RegClass:$rs2, GPR:$imm, (fvti.Mask VMV0:$vm), + GPR:$vl, fvti.Log2SEW)>; + + + def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask VMV0:$vm), + (SplatFPOp (fvti.Scalar fpimm0)), + fvti.RegClass:$rs2, + fvti.RegClass:$passthru, + VLOpFrag)), + (!cast("PseudoVMERGE_VIM_"#fvti.LMul.MX) + fvti.RegClass:$passthru, fvti.RegClass:$rs2, 0, (fvti.Mask VMV0:$vm), + GPR:$vl, fvti.Log2SEW)>; + + def : Pat<(fvti.Vector (riscv_vmerge_vl (fvti.Mask VMV0:$vm), + (SplatFPOp fvti.ScalarRegClass:$rs1), + fvti.RegClass:$rs2, + fvti.RegClass:$passthru, + VLOpFrag)), + (!cast("PseudoVFMERGE_V"#fvti.ScalarSuffix#"M_"#fvti.LMul.MX) + fvti.RegClass:$passthru, fvti.RegClass:$rs2, + (fvti.Scalar fvti.ScalarRegClass:$rs1), + (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW)>; + + def : Pat<(fvti.Vector + (riscv_vrgather_vv_vl fvti.RegClass:$rs2, + (ivti.Vector fvti.RegClass:$rs1), + fvti.RegClass:$passthru, + (fvti.Mask VMV0:$vm), + VLOpFrag)), + (!cast("PseudoVRGATHER_VV_"# fvti.LMul.MX#"_E"# fvti.SEW#"_MASK") + fvti.RegClass:$passthru, fvti.RegClass:$rs2, fvti.RegClass:$rs1, + (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; + def : Pat<(fvti.Vector (riscv_vrgather_vx_vl fvti.RegClass:$rs2, GPR:$rs1, + fvti.RegClass:$passthru, + (fvti.Mask VMV0:$vm), + VLOpFrag)), + (!cast("PseudoVRGATHER_VX_"# fvti.LMul.MX#"_MASK") + fvti.RegClass:$passthru, fvti.RegClass:$rs2, GPR:$rs1, + (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; + def : Pat<(fvti.Vector + (riscv_vrgather_vx_vl fvti.RegClass:$rs2, + uimm5:$imm, + fvti.RegClass:$passthru, + (fvti.Mask VMV0:$vm), + VLOpFrag)), + (!cast("PseudoVRGATHER_VI_"# fvti.LMul.MX#"_MASK") + fvti.RegClass:$passthru, fvti.RegClass:$rs2, uimm5:$imm, + (fvti.Mask VMV0:$vm), GPR:$vl, fvti.Log2SEW, TAIL_AGNOSTIC)>; + } } let Predicates = [HasStdExtZvfbfwma] in { @@ -97,3 +551,224 @@ let Predicates = [HasStdExtZvfbfwma] in { defm : VPatWidenFPMulAccSDNode_VV_VF_RM<"PseudoVFWMACCBF16", AllWidenableBF16ToFloatVectors>; } + +multiclass VPatConversionVI_VF_BF16 { + foreach fvti = AllBF16Vectors in { + defvar ivti = GetIntVTypeInfo.Vti; + let Predicates = !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates) in + defm : VPatConversion; + } +} + +multiclass VPatConversionWF_VI_BF16 { + foreach vtiToWti = AllWidenableIntToBF16Vectors in { + defvar vti = vtiToWti.Vti; + defvar fwti = vtiToWti.Wti; + let Predicates = !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates) in + defm : VPatConversion; + } +} + +multiclass VPatConversionWF_VF_BF16 { + foreach fvtiToFWti = AllWidenableBF16ToFloatVectors in { + defvar fvti = fvtiToFWti.Vti; + defvar fwti = fvtiToFWti.Wti; + let Predicates = !listconcat(GetVTypeMinimalPredicates.Predicates, + GetVTypeMinimalPredicates.Predicates) in + defm : VPatConversion; + } +} + +multiclass VPatConversionVI_WF_BF16 { + foreach vtiToWti = AllWidenableIntToBF16Vectors in { + defvar vti = vtiToWti.Vti; + defvar fwti = vtiToWti.Wti; + let Predicates = !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates) in + defm : VPatConversion; + } +} + +multiclass VPatConversionVI_WF_RM_BF16 { + foreach vtiToWti = AllWidenableIntToBF16Vectors in { + defvar vti = vtiToWti.Vti; + defvar fwti = vtiToWti.Wti; + let Predicates = !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates) in + defm : VPatConversionRoundingMode; + } +} + +multiclass VPatConversionVF_WF_BF16 { + foreach fvtiToFWti = AllWidenableBF16ToFloatVectors in { + defvar fvti = fvtiToFWti.Vti; + defvar fwti = fvtiToFWti.Wti; + let Predicates = !listconcat(GetVTypePredicates.Predicates, + GetVTypePredicates.Predicates) in + defm : VPatConversion; + } +} + +let Predicates = [HasStdExtZvfbfa] in { +defm : VPatBinaryV_VV_VX_RM<"int_riscv_vfadd", "PseudoVFADD_ALT", + AllBF16Vectors, isSEWAware = 1>; +defm : VPatBinaryV_VV_VX_RM<"int_riscv_vfsub", "PseudoVFSUB_ALT", + AllBF16Vectors, isSEWAware = 1>; +defm : VPatBinaryV_VX_RM<"int_riscv_vfrsub", "PseudoVFRSUB_ALT", + AllBF16Vectors, isSEWAware = 1>; +defm : VPatBinaryW_VV_VX_RM<"int_riscv_vfwadd", "PseudoVFWADD_ALT", + AllWidenableBF16ToFloatVectors, isSEWAware=1>; +defm : VPatBinaryW_VV_VX_RM<"int_riscv_vfwsub", "PseudoVFWSUB_ALT", + AllWidenableBF16ToFloatVectors, isSEWAware=1>; +defm : VPatBinaryW_WV_WX_RM<"int_riscv_vfwadd_w", "PseudoVFWADD_ALT", + AllWidenableBF16ToFloatVectors, isSEWAware=1>; +defm : VPatBinaryW_WV_WX_RM<"int_riscv_vfwsub_w", "PseudoVFWSUB_ALT", + AllWidenableBF16ToFloatVectors, isSEWAware=1>; +defm : VPatBinaryV_VV_VX_RM<"int_riscv_vfmul", "PseudoVFMUL_ALT", + AllBF16Vectors, isSEWAware=1>; +defm : VPatBinaryW_VV_VX_RM<"int_riscv_vfwmul", "PseudoVFWMUL_ALT", + AllWidenableBF16ToFloatVectors, isSEWAware=1>; +defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfmacc", "PseudoVFMACC_ALT", + AllBF16Vectors, isSEWAware=1>; +defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfnmacc", "PseudoVFNMACC_ALT", + AllBF16Vectors, isSEWAware=1>; +defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfmsac", "PseudoVFMSAC_ALT", + AllBF16Vectors, isSEWAware=1>; +defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfnmsac", "PseudoVFNMSAC_ALT", + AllBF16Vectors, isSEWAware=1>; +defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfmadd", "PseudoVFMADD_ALT", + AllBF16Vectors, isSEWAware=1>; +defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfnmadd", "PseudoVFNMADD_ALT", + AllBF16Vectors, isSEWAware=1>; +defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfmsub", "PseudoVFMSUB_ALT", + AllBF16Vectors, isSEWAware=1>; +defm : VPatTernaryV_VV_VX_AAXA_RM<"int_riscv_vfnmsub", "PseudoVFNMSUB_ALT", + AllBF16Vectors, isSEWAware=1>; +defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwmacc", "PseudoVFWMACC_ALT", + AllWidenableBF16ToFloatVectors, isSEWAware=1>; +defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwnmacc", "PseudoVFWNMACC_ALT", + AllWidenableBF16ToFloatVectors, isSEWAware=1>; +defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwmsac", "PseudoVFWMSAC_ALT", + AllWidenableBF16ToFloatVectors, isSEWAware=1>; +defm : VPatTernaryW_VV_VX_RM<"int_riscv_vfwnmsac", "PseudoVFWNMSAC_ALT", + AllWidenableBF16ToFloatVectors, isSEWAware=1>; +defm : VPatUnaryV_V<"int_riscv_vfrsqrt7", "PseudoVFRSQRT7_ALT", + AllBF16Vectors, isSEWAware=1>; +defm : VPatUnaryV_V_RM<"int_riscv_vfrec7", "PseudoVFREC7_ALT", + AllBF16Vectors, isSEWAware=1>; +defm : VPatBinaryV_VV_VX<"int_riscv_vfmin", "PseudoVFMIN_ALT", + AllBF16Vectors, isSEWAware=1>; +defm : VPatBinaryV_VV_VX<"int_riscv_vfmax", "PseudoVFMAX_ALT", + AllBF16Vectors, isSEWAware=1>; +defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnj", "PseudoVFSGNJ_ALT", + AllBF16Vectors, isSEWAware=1>; +defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnjn", "PseudoVFSGNJN_ALT", + AllBF16Vectors, isSEWAware=1>; +defm : VPatBinaryV_VV_VX<"int_riscv_vfsgnjx", "PseudoVFSGNJX_ALT", + AllBF16Vectors, isSEWAware=1>; +defm : VPatBinaryM_VV_VX<"int_riscv_vmfeq", "PseudoVMFEQ_ALT", AllBF16Vectors>; +defm : VPatBinaryM_VV_VX<"int_riscv_vmfle", "PseudoVMFLE_ALT", AllBF16Vectors>; +defm : VPatBinaryM_VV_VX<"int_riscv_vmflt", "PseudoVMFLT_ALT", AllBF16Vectors>; +defm : VPatBinaryM_VV_VX<"int_riscv_vmfne", "PseudoVMFNE_ALT", AllBF16Vectors>; +defm : VPatBinaryM_VX<"int_riscv_vmfgt", "PseudoVMFGT_ALT", AllBF16Vectors>; +defm : VPatBinaryM_VX<"int_riscv_vmfge", "PseudoVMFGE_ALT", AllBF16Vectors>; +defm : VPatBinarySwappedM_VV<"int_riscv_vmfgt", "PseudoVMFLT_ALT", AllBF16Vectors>; +defm : VPatBinarySwappedM_VV<"int_riscv_vmfge", "PseudoVMFLE_ALT", AllBF16Vectors>; +defm : VPatConversionVI_VF_BF16<"int_riscv_vfclass", "PseudoVFCLASS_ALT">; +foreach vti = AllBF16Vectors in { + let Predicates = GetVTypePredicates.Predicates in + defm : VPatBinaryCarryInTAIL<"int_riscv_vfmerge", "PseudoVFMERGE_ALT", + "V"#vti.ScalarSuffix#"M", + vti.Vector, + vti.Vector, vti.Scalar, vti.Mask, + vti.Log2SEW, vti.LMul, vti.RegClass, + vti.RegClass, vti.ScalarRegClass>; +} +defm : VPatConversionWF_VI_BF16<"int_riscv_vfwcvt_f_xu_v", "PseudoVFWCVT_F_XU_ALT", + isSEWAware=1>; +defm : VPatConversionWF_VI_BF16<"int_riscv_vfwcvt_f_x_v", "PseudoVFWCVT_F_X_ALT", + isSEWAware=1>; +defm : VPatConversionWF_VF_BF16<"int_riscv_vfwcvt_f_f_v", "PseudoVFWCVT_F_F_ALT", + isSEWAware=1>; +defm : VPatConversionVI_WF_RM_BF16<"int_riscv_vfncvt_xu_f_w", "PseudoVFNCVT_XU_F_ALT">; +defm : VPatConversionVI_WF_RM_BF16<"int_riscv_vfncvt_x_f_w", "PseudoVFNCVT_X_F_ALT">; +defm : VPatConversionVI_WF_BF16<"int_riscv_vfncvt_rtz_xu_f_w", "PseudoVFNCVT_RTZ_XU_F_ALT">; +defm : VPatConversionVI_WF_BF16<"int_riscv_vfncvt_rtz_x_f_w", "PseudoVFNCVT_RTZ_X_F_ALT">; +defm : VPatConversionVF_WF_RM<"int_riscv_vfncvt_f_f_w", "PseudoVFNCVT_F_F_ALT", + AllWidenableBF16ToFloatVectors, isSEWAware=1>; +defm : VPatConversionVF_WF_BF16<"int_riscv_vfncvt_rod_f_f_w", "PseudoVFNCVT_ROD_F_F_ALT", + isSEWAware=1>; +defm : VPatBinaryV_VX<"int_riscv_vfslide1up", "PseudoVFSLIDE1UP_ALT", AllBF16Vectors>; +defm : VPatBinaryV_VX<"int_riscv_vfslide1down", "PseudoVFSLIDE1DOWN_ALT", AllBF16Vectors>; + +foreach fvti = AllBF16Vectors in { + defvar ivti = GetIntVTypeInfo.Vti; + let Predicates = GetVTypePredicates.Predicates in { + // 13.16. Vector Floating-Point Move Instruction + // If we're splatting fpimm0, use vmv.v.x vd, x0. + def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl + fvti.Vector:$passthru, (fvti.Scalar (fpimm0)), VLOpFrag)), + (!cast("PseudoVMV_V_I_"#fvti.LMul.MX) + $passthru, 0, GPR:$vl, fvti.Log2SEW, TU_MU)>; + def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl + fvti.Vector:$passthru, (fvti.Scalar (SelectScalarFPAsInt (XLenVT GPR:$imm))), VLOpFrag)), + (!cast("PseudoVMV_V_X_"#fvti.LMul.MX) + $passthru, GPR:$imm, GPR:$vl, fvti.Log2SEW, TU_MU)>; + } + + let Predicates = GetVTypePredicates.Predicates in { + def : Pat<(fvti.Vector (riscv_vfmv_v_f_vl + fvti.Vector:$passthru, (fvti.Scalar fvti.ScalarRegClass:$rs2), VLOpFrag)), + (!cast("PseudoVFMV_V_ALT_" # fvti.ScalarSuffix # "_" # + fvti.LMul.MX) + $passthru, (fvti.Scalar fvti.ScalarRegClass:$rs2), + GPR:$vl, fvti.Log2SEW, TU_MU)>; + } +} + +foreach vti = NoGroupBF16Vectors in { + let Predicates = GetVTypePredicates.Predicates in { + def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$passthru), + (vti.Scalar (fpimm0)), + VLOpFrag)), + (PseudoVMV_S_X $passthru, (XLenVT X0), GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$passthru), + (vti.Scalar (SelectScalarFPAsInt (XLenVT GPR:$imm))), + VLOpFrag)), + (PseudoVMV_S_X $passthru, GPR:$imm, GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$passthru), + vti.ScalarRegClass:$rs1, + VLOpFrag)), + (!cast("PseudoVFMV_S_"#vti.ScalarSuffix#"_ALT") + vti.RegClass:$passthru, + (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>; + } + + defvar vfmv_f_s_inst = !cast(!strconcat("PseudoVFMV_", + vti.ScalarSuffix, + "_S_ALT")); + // Only pattern-match extract-element operations where the index is 0. Any + // other index will have been custom-lowered to slide the vector correctly + // into place. + let Predicates = GetVTypePredicates.Predicates in + def : Pat<(vti.Scalar (extractelt (vti.Vector vti.RegClass:$rs2), 0)), + (vfmv_f_s_inst vti.RegClass:$rs2, vti.Log2SEW)>; +} +} // Predicates = [HasStdExtZvfbfa] diff --git a/llvm/lib/Target/RISCV/RISCVSubtarget.h b/llvm/lib/Target/RISCV/RISCVSubtarget.h index 7dffa63d85505..a58d138f3e230 100644 --- a/llvm/lib/Target/RISCV/RISCVSubtarget.h +++ b/llvm/lib/Target/RISCV/RISCVSubtarget.h @@ -288,9 +288,12 @@ class RISCVSubtarget : public RISCVGenSubtargetInfo { bool hasVInstructionsI64() const { return HasStdExtZve64x; } bool hasVInstructionsF16Minimal() const { return HasStdExtZvfhmin; } bool hasVInstructionsF16() const { return HasStdExtZvfh; } - bool hasVInstructionsBF16Minimal() const { return HasStdExtZvfbfmin; } + bool hasVInstructionsBF16Minimal() const { + return HasStdExtZvfbfmin || HasStdExtZvfbfa; + } bool hasVInstructionsF32() const { return HasStdExtZve32f; } bool hasVInstructionsF64() const { return HasStdExtZve64d; } + bool hasVInstructionsBF16() const { return HasStdExtZvfbfa; } // F16 and F64 both require F32. bool hasVInstructionsAnyF() const { return hasVInstructionsF32(); } bool hasVInstructionsFullMultiply() const { return HasStdExtV; } diff --git a/llvm/test/CodeGen/RISCV/rvv/mixed-float-bf16-arith.ll b/llvm/test/CodeGen/RISCV/rvv/mixed-float-bf16-arith.ll new file mode 100644 index 0000000000000..145fc5dee4b4c --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/mixed-float-bf16-arith.ll @@ -0,0 +1,186 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfadd.nxv1f16.nxv1f16( + , + , + , + iXLen, iXLen); + +declare @llvm.riscv.vadd.nxv1i32.nxv1i32( + , + , + , + iXLen); + +declare @llvm.riscv.vfadd.nxv1bf16.nxv1bf16( + , + , + , + iXLen, iXLen); + +define @test_half_bf16( %0, %1, iXLen %2, %3, %4, ptr %ptr) nounwind { +; CHECK-LABEL: test_half_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a2, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vfadd.vv v10, v10, v11 +; CHECK-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: fsrm a2 +; CHECK-NEXT: vse16.v v10, (a1) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv1f16.nxv1f16( + poison, + %3, + %4, + iXLen 0, iXLen %2) + + %b = call @llvm.riscv.vfadd.nxv1bf16.nxv1bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + call void @llvm.riscv.vse( %a, ptr %ptr, iXLen %2) + + ret %b +} + +define @test_i32_bf16( %0, %1, iXLen %2, %3, %4, ptr %ptr) nounwind { +; CHECK-LABEL: test_i32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vadd.vv v10, v10, v11 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: vse32.v v10, (a1) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vadd.nxv1i32.nxv1i32( + poison, + %3, + %4, + iXLen %2) + + %b = call @llvm.riscv.vfadd.nxv1bf16.nxv1bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + call void @llvm.riscv.vse( %a, ptr %ptr, iXLen %2) + + ret %b +} + +define @test_half_bf16_half( %0, %1, iXLen %2, %3, %4, ptr %ptr) nounwind { +; CHECK-LABEL: test_half_bf16_half: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a2, 0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vfadd.vv v10, v10, v11 +; CHECK-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; CHECK-NEXT: vfadd.vv v9, v10, v11 +; CHECK-NEXT: fsrm a2 +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vse16.v v9, (a1) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv1f16.nxv1f16( + poison, + %3, + %4, + iXLen 0, iXLen %2) + + %b = call @llvm.riscv.vfadd.nxv1bf16.nxv1bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + %c = call @llvm.riscv.vfadd.nxv1f16.nxv1f16( + poison, + %a, + %4, + iXLen 0, iXLen %2) + + store %c, ptr %ptr + + ret %b +} + +define @test_bf16_half_bf16( %0, %1, iXLen %2, %3, %4, ptr %ptr) nounwind { +; CHECK-LABEL: test_bf16_half_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a2, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; CHECK-NEXT: vfadd.vv v10, v10, v11 +; CHECK-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: fsrm a2 +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vse16.v v10, (a1) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv1bf16.nxv1bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + %b = call @llvm.riscv.vfadd.nxv1f16.nxv1f16( + poison, + %3, + %4, + iXLen 0, iXLen %2) + + %c = call @llvm.riscv.vfadd.nxv1bf16.nxv1bf16( + poison, + %a, + %1, + iXLen 0, iXLen %2) + + store %b, ptr %ptr + + ret %c +} + +define @test_bf16_i16( %0, %1, iXLen %2, %3, %4, ptr %ptr) nounwind { +; CHECK-LABEL: test_bf16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a2, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: vadd.vv v9, v10, v11 +; CHECK-NEXT: fsrm a2 +; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; CHECK-NEXT: vse16.v v9, (a1) +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv1bf16.nxv1bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + %b = call @llvm.riscv.vadd.nxv1i16.nxv1i16( + poison, + %3, + %4, + iXLen %2) + + store %b, ptr %ptr + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-bf.ll new file mode 100644 index 0000000000000..db1b081258d5f --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-bf.ll @@ -0,0 +1,607 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfadd.nxv1bf16.nxv1bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfadd_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv1bf16.nxv1bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv2bf16.nxv2bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfadd_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv2bf16.nxv2bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv4bf16.nxv4bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfadd_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv4bf16.nxv4bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv8bf16.nxv8bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfadd_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv8bf16.nxv8bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv16bf16.nxv16bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfadd_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv16bf16.nxv16bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv32bf16.nxv32bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfadd_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vv_nxv32bf16_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfadd.vv v8, v8, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv32bf16.nxv32bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, a1, e16alt, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv32bf16.nxv32bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv1bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv1bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv2bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv2bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv4bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv4bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv8bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv8bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv16bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv16bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfadd.nxv32bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfadd_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfadd_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.nxv32bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfadd.mask.nxv32bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfadd_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu +; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfadd.mask.nxv32bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfclass-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfclass-bf.ll new file mode 100644 index 0000000000000..d7d49b379b5a4 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfclass-bf.ll @@ -0,0 +1,294 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfclass.nxv1i16.nxv1bf16( + , + , + iXLen); + +define @intrinsic_vfclass_v_nxv1i16_nxv1bf16( +; CHECK-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: ret + %0, + iXLen %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv1i16.nxv1bf16( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv1i16.nxv1bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfclass_mask_v_nxv1i16_nxv1bf16( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfclass.v v8, v9, v0.t +; CHECK-NEXT: ret + %0, + %1, + %2, + iXLen %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv1i16.nxv1bf16( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv2i16.nxv2bf16( + , + , + iXLen); + +define @intrinsic_vfclass_v_nxv2i16_nxv2bf16( +; CHECK-LABEL: intrinsic_vfclass_v_nxv2i16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: ret + %0, + iXLen %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv2i16.nxv2bf16( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv2i16.nxv2bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfclass_mask_v_nxv2i16_nxv2bf16( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfclass.v v8, v9, v0.t +; CHECK-NEXT: ret + %0, + %1, + %2, + iXLen %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv2i16.nxv2bf16( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv4i16.nxv4bf16( + , + , + iXLen); + +define @intrinsic_vfclass_v_nxv4i16_nxv4bf16( +; CHECK-LABEL: intrinsic_vfclass_v_nxv4i16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: ret + %0, + iXLen %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv4i16.nxv4bf16( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv4i16.nxv4bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfclass_mask_v_nxv4i16_nxv4bf16( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfclass.v v8, v9, v0.t +; CHECK-NEXT: ret + %0, + %1, + %2, + iXLen %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv4i16.nxv4bf16( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv8i16.nxv8bf16( + , + , + iXLen); + +define @intrinsic_vfclass_v_nxv8i16_nxv8bf16( +; CHECK-LABEL: intrinsic_vfclass_v_nxv8i16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: ret + %0, + iXLen %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv8i16.nxv8bf16( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv8i16.nxv8bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfclass_mask_v_nxv8i16_nxv8bf16( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfclass.v v8, v10, v0.t +; CHECK-NEXT: ret + %0, + %1, + %2, + iXLen %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv8i16.nxv8bf16( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv16i16.nxv16bf16( + , + , + iXLen); + +define @intrinsic_vfclass_v_nxv16i16_nxv16bf16( +; CHECK-LABEL: intrinsic_vfclass_v_nxv16i16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: ret + %0, + iXLen %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv16i16.nxv16bf16( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv16i16.nxv16bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfclass_mask_v_nxv16i16_nxv16bf16( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfclass.v v8, v12, v0.t +; CHECK-NEXT: ret + %0, + %1, + %2, + iXLen %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv16i16.nxv16bf16( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfclass.nxv32i16.nxv32bf16( + , + , + iXLen); + +define @intrinsic_vfclass_v_nxv32i16_nxv32bf16( +; CHECK-LABEL: intrinsic_vfclass_v_nxv32i16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfclass.v v8, v8 +; CHECK-NEXT: ret + %0, + iXLen %1) nounwind { +entry: + %a = call @llvm.riscv.vfclass.nxv32i16.nxv32bf16( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfclass.mask.nxv32i16.nxv32bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfclass_mask_v_nxv32i16_nxv32bf16( +; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv32i16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, tu, mu +; CHECK-NEXT: vfclass.v v8, v16, v0.t +; CHECK-NEXT: ret + %0, + %1, + %2, + iXLen %3) nounwind { +entry: + %a = call @llvm.riscv.vfclass.mask.nxv32i16.nxv32bf16( + %0, + %1, + %2, + iXLen %3, iXLen 0) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc-bf.ll new file mode 100644 index 0000000000000..0937a82b48580 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc-bf.ll @@ -0,0 +1,506 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfmacc.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfmacc.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfmacc.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfmacc.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfmacc.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfmacc.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfmacc.vv v8, v10, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfmacc.vv v8, v10, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfmacc.vv v8, v12, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfmacc.vv v8, v12, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv1bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vf_nxv1bf16_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfmacc.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.nxv1bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv1bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_mask_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1bf16_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv1bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv2bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vf_nxv2bf16_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfmacc.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.nxv2bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv2bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_mask_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2bf16_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv2bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv4bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vf_nxv4bf16_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfmacc.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.nxv4bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv4bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_mask_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4bf16_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv4bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv8bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vf_nxv8bf16_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfmacc.vf v8, fa0, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.nxv8bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv8bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_mask_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8bf16_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv8bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.nxv16bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_vf_nxv16bf16_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfmacc.vf v8, fa0, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.nxv16bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmacc.mask.nxv16bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmacc_mask_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv16bf16_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfmacc.vf v8, fa0, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmacc.mask.nxv16bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-bf.ll new file mode 100644 index 0000000000000..795fd4f56ec2c --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-bf.ll @@ -0,0 +1,506 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfmadd.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfmadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfmadd.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfmadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfmadd.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfmadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfmadd.vv v8, v10, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfmadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfmadd.vv v8, v12, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfmadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv1bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vf_nxv1bf16_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.nxv1bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv1bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_mask_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1bf16_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv1bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv2bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vf_nxv2bf16_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.nxv2bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv2bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_mask_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2bf16_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv2bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv4bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vf_nxv4bf16_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.nxv4bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv4bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_mask_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4bf16_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv4bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv8bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vf_nxv8bf16_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfmadd.vf v8, fa0, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.nxv8bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv8bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_mask_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8bf16_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv8bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmadd.nxv16bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_vf_nxv16bf16_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfmadd.vf v8, fa0, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.nxv16bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmadd.mask.nxv16bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmadd_mask_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv16bf16_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmadd.mask.nxv16bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-bf.ll new file mode 100644 index 0000000000000..a337d3061ce78 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-bf.ll @@ -0,0 +1,571 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfmax.nxv1bf16.nxv1bf16( + , + , + , + iXLen); + +define @intrinsic_vfmax_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmax_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.nxv1bf16.nxv1bf16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfmax_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.mask.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv2bf16.nxv2bf16( + , + , + , + iXLen); + +define @intrinsic_vfmax_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmax_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.nxv2bf16.nxv2bf16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfmax_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.mask.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv4bf16.nxv4bf16( + , + , + , + iXLen); + +define @intrinsic_vfmax_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmax_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.nxv4bf16.nxv4bf16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfmax_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfmax.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.mask.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv8bf16.nxv8bf16( + , + , + , + iXLen); + +define @intrinsic_vfmax_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmax_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.nxv8bf16.nxv8bf16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfmax_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfmax.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.mask.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv16bf16.nxv16bf16( + , + , + , + iXLen); + +define @intrinsic_vfmax_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmax_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.nxv16bf16.nxv16bf16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfmax_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfmax.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.mask.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv32bf16.nxv32bf16( + , + , + , + iXLen); + +define @intrinsic_vfmax_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmax_vv_nxv32bf16_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfmax.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.nxv32bf16.nxv32bf16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfmax_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmax_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16alt, m8, ta, mu +; CHECK-NEXT: vfmax.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.mask.nxv32bf16.nxv32bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv1bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfmax_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmax_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.nxv1bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfmax_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv2bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfmax_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmax_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.nxv2bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfmax_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv4bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfmax_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmax_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.nxv4bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfmax_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfmax.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv8bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfmax_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmax_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.nxv8bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfmax_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfmax.vf v8, v10, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv16bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfmax_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmax_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.nxv16bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfmax_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfmax.vf v8, v12, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmax.nxv32bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfmax_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmax_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfmax.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.nxv32bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmax.mask.nxv32bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfmax_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmax_mask_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu +; CHECK-NEXT: vfmax.vf v8, v16, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmax.mask.nxv32bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmerge-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmerge-bf.ll new file mode 100644 index 0000000000000..86ba7c7fb7fe6 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmerge-bf.ll @@ -0,0 +1,258 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfmerge.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vfmerge_vfm_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmerge.nxv1bf16.bf16( + poison, + %0, + bfloat %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vfmerge_vfm_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmerge.nxv2bf16.bf16( + poison, + %0, + bfloat %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vfmerge_vfm_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmerge.nxv4bf16.bf16( + poison, + %0, + bfloat %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vfmerge_vfm_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmerge.nxv8bf16.bf16( + poison, + %0, + bfloat %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vfmerge_vfm_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmerge.nxv16bf16.bf16( + poison, + %0, + bfloat %1, + %2, + iXLen %3) + + ret %a +} + +declare @llvm.riscv.vfmerge.nxv32bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vfmerge_vfm_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmerge_vfm_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmerge.nxv32bf16.bf16( + poison, + %0, + bfloat %1, + %2, + iXLen %3) + + ret %a +} + +define @intrinsic_vfmerge_vzm_nxv1bf16_nxv1bf16_bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x fa5, zero +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfmerge.vfm v8, v8, fa5, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmerge.nxv1bf16.bf16( + poison, + %0, + bfloat zeroinitializer, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vfmerge_vzm_nxv2bf16_nxv2bf16_bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x fa5, zero +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfmerge.vfm v8, v8, fa5, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmerge.nxv2bf16.bf16( + poison, + %0, + bfloat zeroinitializer, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vfmerge_vzm_nxv4bf16_nxv4bf16_bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x fa5, zero +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfmerge.vfm v8, v8, fa5, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmerge.nxv4bf16.bf16( + poison, + %0, + bfloat zeroinitializer, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vfmerge_vzm_nxv8bf16_nxv8bf16_bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x fa5, zero +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfmerge.vfm v8, v8, fa5, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmerge.nxv8bf16.bf16( + poison, + %0, + bfloat zeroinitializer, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vfmerge_vzm_nxv16bf16_nxv16bf16_bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x fa5, zero +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfmerge.vfm v8, v8, fa5, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmerge.nxv16bf16.bf16( + poison, + %0, + bfloat zeroinitializer, + %1, + iXLen %2) + + ret %a +} + +define @intrinsic_vfmerge_vzm_nxv32bf16_nxv32bf16_bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmerge_vzm_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fmv.h.x fa5, zero +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfmerge.vfm v8, v8, fa5, v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmerge.nxv32bf16.bf16( + poison, + %0, + bfloat zeroinitializer, + %1, + iXLen %2) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-bf.ll new file mode 100644 index 0000000000000..37c0cf506a6fa --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-bf.ll @@ -0,0 +1,571 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfmin.nxv1bf16.nxv1bf16( + , + , + , + iXLen); + +define @intrinsic_vfmin_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmin_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.nxv1bf16.nxv1bf16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfmin_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.mask.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv2bf16.nxv2bf16( + , + , + , + iXLen); + +define @intrinsic_vfmin_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmin_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.nxv2bf16.nxv2bf16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfmin_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.mask.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv4bf16.nxv4bf16( + , + , + , + iXLen); + +define @intrinsic_vfmin_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmin_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.nxv4bf16.nxv4bf16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfmin_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.mask.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv8bf16.nxv8bf16( + , + , + , + iXLen); + +define @intrinsic_vfmin_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmin_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.nxv8bf16.nxv8bf16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfmin_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfmin.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.mask.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv16bf16.nxv16bf16( + , + , + , + iXLen); + +define @intrinsic_vfmin_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmin_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.nxv16bf16.nxv16bf16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfmin_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfmin.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.mask.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv32bf16.nxv32bf16( + , + , + , + iXLen); + +define @intrinsic_vfmin_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmin_vv_nxv32bf16_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfmin.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.nxv32bf16.nxv32bf16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfmin_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16alt, m8, ta, mu +; CHECK-NEXT: vfmin.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.mask.nxv32bf16.nxv32bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv1bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfmin_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmin_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.nxv1bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfmin_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv2bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfmin_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmin_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.nxv2bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfmin_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv4bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfmin_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmin_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.nxv4bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfmin_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfmin.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv8bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfmin_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmin_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.nxv8bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfmin_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfmin.vf v8, v10, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv16bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfmin_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmin_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.nxv16bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfmin_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfmin.vf v8, v12, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmin.nxv32bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfmin_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmin_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfmin.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.nxv32bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmin.mask.nxv32bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfmin_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmin_mask_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu +; CHECK-NEXT: vfmin.vf v8, v16, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmin.mask.nxv32bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac-bf.ll new file mode 100644 index 0000000000000..ebeda9eaf42c2 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac-bf.ll @@ -0,0 +1,506 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfmsac.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfmsac.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfmsac.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfmsac.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfmsac.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfmsac.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfmsac.vv v8, v10, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfmsac.vv v8, v10, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfmsac.vv v8, v12, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfmsac.vv v8, v12, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv1bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vf_nxv1bf16_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfmsac.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.nxv1bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv1bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_mask_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1bf16_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv1bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv2bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vf_nxv2bf16_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfmsac.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.nxv2bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv2bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_mask_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2bf16_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv2bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv4bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vf_nxv4bf16_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfmsac.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.nxv4bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv4bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_mask_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4bf16_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv4bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv8bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vf_nxv8bf16_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfmsac.vf v8, fa0, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.nxv8bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv8bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_mask_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8bf16_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv8bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsac.nxv16bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_vf_nxv16bf16_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfmsac.vf v8, fa0, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.nxv16bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsac.mask.nxv16bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsac_mask_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv16bf16_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfmsac.vf v8, fa0, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsac.mask.nxv16bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-bf.ll new file mode 100644 index 0000000000000..b032c1d66f3b9 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-bf.ll @@ -0,0 +1,506 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfmsub.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfmsub.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfmsub.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfmsub.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfmsub.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfmsub.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfmsub.vv v8, v10, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfmsub.vv v8, v10, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfmsub.vv v8, v12, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfmsub.vv v8, v12, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv1bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vf_nxv1bf16_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfmsub.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.nxv1bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv1bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_mask_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1bf16_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv1bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv2bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vf_nxv2bf16_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfmsub.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.nxv2bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv2bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_mask_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2bf16_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv2bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv4bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vf_nxv4bf16_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfmsub.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.nxv4bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv4bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_mask_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4bf16_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv4bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv8bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vf_nxv8bf16_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfmsub.vf v8, fa0, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.nxv8bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv8bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_mask_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8bf16_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv8bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfmsub.nxv16bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_vf_nxv16bf16_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfmsub.vf v8, fa0, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.nxv16bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfmsub.mask.nxv16bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmsub_mask_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv16bf16_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmsub.mask.nxv16bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-bf.ll new file mode 100644 index 0000000000000..44bce723c39d4 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-bf.ll @@ -0,0 +1,607 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfmul.nxv1bf16.nxv1bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfmul_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmul_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.nxv1bf16.nxv1bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmul_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.mask.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv2bf16.nxv2bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfmul_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmul_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.nxv2bf16.nxv2bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmul_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.mask.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv4bf16.nxv4bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfmul_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmul_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.nxv4bf16.nxv4bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmul_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.mask.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv8bf16.nxv8bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfmul_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmul_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.nxv8bf16.nxv8bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmul_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v10, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.mask.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv16bf16.nxv16bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfmul_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmul_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.nxv16bf16.nxv16bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmul_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfmul.vv v8, v12, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.mask.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv32bf16.nxv32bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfmul_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmul_vv_nxv32bf16_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfmul.vv v8, v8, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.nxv32bf16.nxv32bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmul_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, a1, e16alt, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v16, v24, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.mask.nxv32bf16.nxv32bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv1bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfmul_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmul_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.nxv1bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmul_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv2bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfmul_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmul_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.nxv2bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmul_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv4bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfmul_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmul_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.nxv4bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmul_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfmul.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv8bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfmul_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmul_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.nxv8bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmul_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfmul.vf v8, v10, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv16bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfmul_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmul_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.nxv16bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmul_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfmul.vf v8, v12, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv32bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfmul_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmul_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.nxv32bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv32bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfmul_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu +; CHECK-NEXT: vfmul.vf v8, v16, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmul.mask.nxv32bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv-bf-s.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv-bf-s.ll new file mode 100644 index 0000000000000..fbc7311945c8b --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv-bf-s.ll @@ -0,0 +1,88 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+d,+v,+experimental-zvfbfa -target-abi lp64d -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv32 -mattr=+d,+v,+experimental-zvfbfa -target-abi ilp32d -verify-machineinstrs < %s | FileCheck %s + +declare bfloat @llvm.riscv.vfmv.f.s.nxv1bf16() + +define bfloat @intrinsic_vfmv.f.s_s_nxv1bf16( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: fmv.h.x fa0, a0 +; CHECK-NEXT: ret +entry: + %a = call bfloat @llvm.riscv.vfmv.f.s.nxv1bf16( %0) + ret bfloat %a +} + +declare bfloat @llvm.riscv.vfmv.f.s.nxv2bf16() + +define bfloat @intrinsic_vfmv.f.s_s_nxv2bf16( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: fmv.h.x fa0, a0 +; CHECK-NEXT: ret +entry: + %a = call bfloat @llvm.riscv.vfmv.f.s.nxv2bf16( %0) + ret bfloat %a +} + +declare bfloat @llvm.riscv.vfmv.f.s.nxv4bf16() + +define bfloat @intrinsic_vfmv.f.s_s_nxv4bf16( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: fmv.h.x fa0, a0 +; CHECK-NEXT: ret +entry: + %a = call bfloat @llvm.riscv.vfmv.f.s.nxv4bf16( %0) + ret bfloat %a +} + +declare bfloat @llvm.riscv.vfmv.f.s.nxv8bf16() + +define bfloat @intrinsic_vfmv.f.s_s_nxv8bf16( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: fmv.h.x fa0, a0 +; CHECK-NEXT: ret +entry: + %a = call bfloat @llvm.riscv.vfmv.f.s.nxv8bf16( %0) + ret bfloat %a +} + +declare bfloat @llvm.riscv.vfmv.f.s.nxv16bf16() + +define bfloat @intrinsic_vfmv.f.s_s_nxv16bf16( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: fmv.h.x fa0, a0 +; CHECK-NEXT: ret +entry: + %a = call bfloat @llvm.riscv.vfmv.f.s.nxv16bf16( %0) + ret bfloat %a +} + +declare bfloat @llvm.riscv.vfmv.f.s.nxv32bf16() + +define bfloat @intrinsic_vfmv.f.s_s_nxv32bf16( %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma +; CHECK-NEXT: vmv.x.s a0, v8 +; CHECK-NEXT: fmv.h.x fa0, a0 +; CHECK-NEXT: ret +entry: + %a = call bfloat @llvm.riscv.vfmv.f.s.nxv32bf16( %0) + ret bfloat %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv-s-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv-s-bf.ll new file mode 100644 index 0000000000000..a810809fca515 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv-s-bf.ll @@ -0,0 +1,161 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK + +declare @llvm.riscv.vfmv.s.f.nxv1bf16(, bfloat, iXLen) + +define @intrinsic_vfmv.s.f_f_nxv1bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv1bf16( %0, bfloat %1, iXLen %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv2bf16(, bfloat, iXLen) + +define @intrinsic_vfmv.s.f_f_nxv2bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv2bf16( %0, bfloat %1, iXLen %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv4bf16(, bfloat, iXLen) + +define @intrinsic_vfmv.s.f_f_nxv4bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv4bf16( %0, bfloat %1, iXLen %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv8bf16(, bfloat, iXLen) + +define @intrinsic_vfmv.s.f_f_nxv8bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv8bf16( %0, bfloat %1, iXLen %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv16bf16(, bfloat, iXLen) + +define @intrinsic_vfmv.s.f_f_nxv16bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv16bf16( %0, bfloat %1, iXLen %2) + ret %a +} + +declare @llvm.riscv.vfmv.s.f.nxv32bf16(, bfloat, iXLen) + +define @intrinsic_vfmv.s.f_f_nxv32bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfmv.s.f v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv32bf16( %0, bfloat %1, iXLen %2) + ret %a +} + +define @intrinsic_vfmv.s.f_f_zero_nxv1bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vmv.s.x v8, zero +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv1bf16( %0, bfloat 0.0, iXLen %1) + ret %a +} + +define @intrinsic_vfmv.s.f_f_zero_nxv2bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vmv.s.x v8, zero +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv2bf16( %0, bfloat 0.0, iXLen %1) + ret %a +} + +define @intrinsic_vfmv.s.f_f_zero_nxv4bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vmv.s.x v8, zero +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv4bf16( %0, bfloat 0.0, iXLen %1) + ret %a +} + +define @intrinsic_vfmv.s.f_f_zero_nxv8bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vmv.s.x v8, zero +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv8bf16( %0, bfloat 0.0, iXLen %1) + ret %a +} + +define @intrinsic_vfmv.s.f_f_zero_nxv16bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vmv.s.x v8, zero +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv16bf16( %0, bfloat 0.0, iXLen %1) + ret %a +} + +define @intrinsic_vfmv.s.f_f_zero_nxv32bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vmv.s.x v8, zero +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv32bf16( %0, bfloat 0.0, iXLen %1) + ret %a +} + +define @intrinsic_vfmv.s.f_f_nxv1bf16_negzero( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1bf16_negzero: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lui a1, 1048568 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma +; CHECK-NEXT: vmv.s.x v8, a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.s.f.nxv1bf16( %0, bfloat -0.0, iXLen %1) + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv-v-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv-v-bf.ll new file mode 100644 index 0000000000000..f3293ddc83ef9 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmv-v-bf.ll @@ -0,0 +1,216 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfmv.v.f.nxv1bf16( + , + bfloat, + iXLen); + +define @intrinsic_vfmv.v.f_f_nxv1bf16(bfloat %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.v.f.nxv1bf16( + poison, + bfloat %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv2bf16( + , + bfloat, + iXLen); + +define @intrinsic_vfmv.v.f_f_nxv2bf16(bfloat %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.v.f.nxv2bf16( + poison, + bfloat %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv4bf16( + , + bfloat, + iXLen); + +define @intrinsic_vfmv.v.f_f_nxv4bf16(bfloat %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.v.f.nxv4bf16( + poison, + bfloat %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv8bf16( + , + bfloat, + iXLen); + +define @intrinsic_vfmv.v.f_f_nxv8bf16(bfloat %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.v.f.nxv8bf16( + poison, + bfloat %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv16bf16( + , + bfloat, + iXLen); + +define @intrinsic_vfmv.v.f_f_nxv16bf16(bfloat %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.v.f.nxv16bf16( + poison, + bfloat %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfmv.v.f.nxv32bf16( + , + bfloat, + iXLen); + +define @intrinsic_vfmv.v.f_f_nxv32bf16(bfloat %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfmv.v.f_f_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfmv.v.f v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.v.f.nxv32bf16( + poison, + bfloat %0, + iXLen %1) + + ret %a +} + +define @intrinsic_vfmv.v.f_zero_nxv1bf16(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vfmv.v.f_zero_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.v.f.nxv1bf16( + poison, + bfloat 0.0, + iXLen %0) + + ret %a +} + +define @intrinsic_vmv.v.i_zero_nxv2bf16(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.v.f.nxv2bf16( + poison, + bfloat 0.0, + iXLen %0) + + ret %a +} + +define @intrinsic_vmv.v.i_zero_nxv4bf16(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.v.f.nxv4bf16( + poison, + bfloat 0.0, + iXLen %0) + + ret %a +} + +define @intrinsic_vmv.v.i_zero_nxv8bf16(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.v.f.nxv8bf16( + poison, + bfloat 0.0, + iXLen %0) + + ret %a +} + +define @intrinsic_vmv.v.i_zero_nxv16bf16(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.v.f.nxv16bf16( + poison, + bfloat 0.0, + iXLen %0) + + ret %a +} + +define @intrinsic_vmv.v.i_zero_nxv32bf16(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vmv.v.i_zero_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfmv.v.f.nxv32bf16( + poison, + bfloat 0.0, + iXLen %0) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-bf-f.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-bf-f.ll new file mode 100644 index 0000000000000..7d587fd55cd83 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-bf-f.ll @@ -0,0 +1,226 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32( + , + , + iXLen); + +define @intrinsic_vfncvt_rod.f.f.w_nxv1bf16_nxv1f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1bf16_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfncvt.rod.f.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1bf16.nxv1f32( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv1bf16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv1bf16_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfncvt.rod.f.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1bf16.nxv1f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32( + , + , + iXLen); + +define @intrinsic_vfncvt_rod.f.f.w_nxv2bf16_nxv2f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv2bf16_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfncvt.rod.f.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2bf16.nxv2f32( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv2bf16_nxv2f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv2bf16_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfncvt.rod.f.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2bf16.nxv2f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32( + , + , + iXLen); + +define @intrinsic_vfncvt_rod.f.f.w_nxv4bf16_nxv4f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv4bf16_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfncvt.rod.f.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4bf16.nxv4f32( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv4bf16_nxv4f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv4bf16_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfncvt.rod.f.f.w v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4bf16.nxv4f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32( + , + , + iXLen); + +define @intrinsic_vfncvt_rod.f.f.w_nxv8bf16_nxv8f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv8bf16_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfncvt.rod.f.f.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8bf16.nxv8f32( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv8bf16_nxv8f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv8bf16_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfncvt.rod.f.f.w v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8bf16.nxv8f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32( + , + , + iXLen); + +define @intrinsic_vfncvt_rod.f.f.w_nxv16bf16_nxv16f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv16bf16_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfncvt.rod.f.f.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16bf16.nxv16f32( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv16bf16_nxv16f32( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv16bf16_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfncvt.rod.f.f.w v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16bf16.nxv16f32( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-bf.ll new file mode 100644 index 0000000000000..ee9e3d1b9f630 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-bf.ll @@ -0,0 +1,270 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16( + , + , + iXLen); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, ma +; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1bf16( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1bf16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16( + , + , + iXLen); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, ma +; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2bf16( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2bf16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16( + , + , + iXLen); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, ma +; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4bf16( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4bf16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16( + , + , + iXLen); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, ma +; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8bf16( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8bf16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16( + , + , + iXLen); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, ma +; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16bf16( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16bf16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16( + , + , + iXLen); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, ma +; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32bf16( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, mu +; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32bf16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-bf.ll new file mode 100644 index 0000000000000..521f7274dc5c9 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-bf.ll @@ -0,0 +1,270 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16( + , + , + iXLen); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, ma +; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1bf16( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, mu +; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1bf16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16( + , + , + iXLen); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, ma +; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2bf16( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, mu +; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2bf16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16( + , + , + iXLen); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, ma +; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4bf16( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, mu +; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4bf16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16( + , + , + iXLen); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, ma +; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8bf16( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, mu +; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8bf16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16( + , + , + iXLen); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, ma +; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16bf16( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, mu +; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16bf16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16( + , + , + iXLen); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, ma +; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32bf16( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, mu +; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32bf16( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-bf.ll new file mode 100644 index 0000000000000..ab9ebade287e6 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-bf.ll @@ -0,0 +1,288 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, ma +; CHECK-NEXT: vfncvt.x.f.w v9, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1bf16( + poison, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv2i8_nxv2bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i8_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, ma +; CHECK-NEXT: vfncvt.x.f.w v9, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2bf16( + poison, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv4i8_nxv4bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i8_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, ma +; CHECK-NEXT: vfncvt.x.f.w v9, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4bf16( + poison, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv8i8_nxv8bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i8_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, ma +; CHECK-NEXT: vfncvt.x.f.w v10, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8bf16( + poison, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv16i8_nxv16bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv16i8_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, ma +; CHECK-NEXT: vfncvt.x.f.w v12, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16bf16( + poison, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfncvt_x.f.w_nxv32i8_nxv32bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv32i8_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, ma +; CHECK-NEXT: vfncvt.x.f.w v16, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32bf16( + poison, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, mu +; CHECK-NEXT: vfncvt.x.f.w v8, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-bf.ll new file mode 100644 index 0000000000000..61c6803ce12bd --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-bf.ll @@ -0,0 +1,288 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, ma +; CHECK-NEXT: vfncvt.xu.f.w v9, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1bf16( + poison, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, ma +; CHECK-NEXT: vfncvt.xu.f.w v9, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2bf16( + poison, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, ma +; CHECK-NEXT: vfncvt.xu.f.w v9, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4bf16( + poison, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, ma +; CHECK-NEXT: vfncvt.xu.f.w v10, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8bf16( + poison, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, ma +; CHECK-NEXT: vfncvt.xu.f.w v12, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16bf16( + poison, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, ma +; CHECK-NEXT: vfncvt.xu.f.w v16, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32bf16( + poison, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, mu +; CHECK-NEXT: vfncvt.xu.f.w v8, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-bf.ll new file mode 100644 index 0000000000000..4bf643003a38c --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc-bf.ll @@ -0,0 +1,506 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfnmacc.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfnmacc.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfnmacc.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfnmacc.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfnmacc.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfnmacc.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfnmacc.vv v8, v10, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfnmacc.vv v8, v10, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfnmacc.vv v8, v12, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfnmacc.vv v8, v12, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv1bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv1bf16_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.nxv1bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_mask_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1bf16_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv1bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv2bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv2bf16_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.nxv2bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_mask_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2bf16_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv2bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv4bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv4bf16_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.nxv4bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_mask_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4bf16_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv4bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv8bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv8bf16_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfnmacc.vf v8, fa0, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.nxv8bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_mask_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8bf16_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv8bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmacc.nxv16bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_vf_nxv16bf16_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfnmacc.vf v8, fa0, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.nxv16bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmacc_mask_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv16bf16_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfnmacc.vf v8, fa0, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmacc.mask.nxv16bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-bf.ll new file mode 100644 index 0000000000000..7dcaa1c24e6de --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-bf.ll @@ -0,0 +1,506 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfnmadd.vv v8, v10, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfnmadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfnmadd.vv v8, v12, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfnmadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv1bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv1bf16_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.nxv1bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_mask_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1bf16_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv1bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv2bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv2bf16_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.nxv2bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_mask_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2bf16_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv2bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv4bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv4bf16_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.nxv4bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_mask_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4bf16_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv4bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv8bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv8bf16_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.nxv8bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_mask_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8bf16_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv8bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmadd.nxv16bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_vf_nxv16bf16_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.nxv16bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmadd_mask_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv16bf16_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmadd.mask.nxv16bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-bf.ll new file mode 100644 index 0000000000000..9528f80453398 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac-bf.ll @@ -0,0 +1,506 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfnmsac.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfnmsac.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfnmsac.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfnmsac.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfnmsac.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfnmsac.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfnmsac.vv v8, v10, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfnmsac.vv v8, v10, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfnmsac.vv v8, v12, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfnmsac.vv v8, v12, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv1bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv1bf16_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.nxv1bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_mask_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1bf16_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv1bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv2bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv2bf16_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.nxv2bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_mask_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2bf16_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv2bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv4bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv4bf16_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.nxv4bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_mask_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4bf16_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv4bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv8bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv8bf16_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfnmsac.vf v8, fa0, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.nxv8bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_mask_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8bf16_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv8bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsac.nxv16bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_vf_nxv16bf16_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfnmsac.vf v8, fa0, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.nxv16bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsac_mask_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv16bf16_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfnmsac.vf v8, fa0, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsac.mask.nxv16bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-bf.ll new file mode 100644 index 0000000000000..dcbb9ced92db7 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-bf.ll @@ -0,0 +1,506 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfnmsub.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfnmsub.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfnmsub.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfnmsub.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfnmsub.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfnmsub.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfnmsub.vv v8, v10, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfnmsub.vv v8, v10, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfnmsub.vv v8, v12, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfnmsub.vv v8, v12, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv1bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1bf16_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.nxv1bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_mask_vf_nxv1bf16_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1bf16_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv1bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv2bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2bf16_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.nxv2bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_mask_vf_nxv2bf16_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2bf16_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv2bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv4bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4bf16_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.nxv4bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_mask_vf_nxv4bf16_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4bf16_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv4bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv8bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8bf16_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.nxv8bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_mask_vf_nxv8bf16_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8bf16_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv8bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + +declare @llvm.riscv.vfnmsub.nxv16bf16.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv16bf16_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.nxv16bf16.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfnmsub_mask_vf_nxv16bf16_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv16bf16_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfnmsub.mask.nxv16bf16.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0); + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrec7-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfrec7-bf.ll new file mode 100644 index 0000000000000..1211415ffe432 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfrec7-bf.ll @@ -0,0 +1,282 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfrec7.nxv1bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfrec7_v_nxv1bf16_nxv1bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfrec7_v_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfrec7.v v8, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrec7.nxv1bf16( + poison, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfrec7.mask.nxv1bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfrec7_mask_v_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfrec7.v v8, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrec7.mask.nxv1bf16( + %1, + %2, + %0, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrec7.nxv2bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfrec7_v_nxv2bf16_nxv2bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfrec7_v_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfrec7.v v8, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrec7.nxv2bf16( + poison, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfrec7.mask.nxv2bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfrec7_mask_v_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfrec7.v v8, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrec7.mask.nxv2bf16( + %1, + %2, + %0, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrec7.nxv4bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfrec7_v_nxv4bf16_nxv4bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfrec7_v_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfrec7.v v8, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrec7.nxv4bf16( + poison, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfrec7.mask.nxv4bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfrec7_mask_v_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfrec7.v v8, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrec7.mask.nxv4bf16( + %1, + %2, + %0, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrec7.nxv8bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfrec7_v_nxv8bf16_nxv8bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfrec7_v_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfrec7.v v8, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrec7.nxv8bf16( + poison, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfrec7.mask.nxv8bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfrec7_mask_v_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfrec7.v v8, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrec7.mask.nxv8bf16( + %1, + %2, + %0, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrec7.nxv16bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfrec7_v_nxv16bf16_nxv16bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfrec7_v_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfrec7.v v8, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrec7.nxv16bf16( + poison, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfrec7.mask.nxv16bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfrec7_mask_v_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfrec7.v v8, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrec7.mask.nxv16bf16( + %1, + %2, + %0, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrec7.nxv32bf16( + , + , + iXLen, iXLen); + +define @intrinsic_vfrec7_v_nxv32bf16_nxv32bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfrec7_v_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfrec7.v v8, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrec7.nxv32bf16( + poison, + %0, + iXLen 0, iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfrec7.mask.nxv32bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfrec7_mask_v_nxv32bf16_nxv32bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu +; CHECK-NEXT: vfrec7.v v8, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrec7.mask.nxv32bf16( + %1, + %2, + %0, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-bf16.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-bf16.ll new file mode 100644 index 0000000000000..4626b865ab454 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsqrt7-bf16.ll @@ -0,0 +1,264 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfrsqrt7.nxv1bf16( + , + , + iXLen); + +define @intrinsic_vfrsqrt7_v_nxv1bf16_nxv1bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfrsqrt7.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsqrt7.nxv1bf16( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfrsqrt7.mask.nxv1bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfrsqrt7_mask_v_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsqrt7.mask.nxv1bf16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrsqrt7.nxv2bf16( + , + , + iXLen); + +define @intrinsic_vfrsqrt7_v_nxv2bf16_nxv2bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfrsqrt7.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsqrt7.nxv2bf16( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfrsqrt7.mask.nxv2bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfrsqrt7_mask_v_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsqrt7.mask.nxv2bf16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrsqrt7.nxv4bf16( + , + , + iXLen); + +define @intrinsic_vfrsqrt7_v_nxv4bf16_nxv4bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfrsqrt7.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsqrt7.nxv4bf16( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfrsqrt7.mask.nxv4bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfrsqrt7_mask_v_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsqrt7.mask.nxv4bf16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrsqrt7.nxv8bf16( + , + , + iXLen); + +define @intrinsic_vfrsqrt7_v_nxv8bf16_nxv8bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfrsqrt7.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsqrt7.nxv8bf16( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfrsqrt7.mask.nxv8bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfrsqrt7_mask_v_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfrsqrt7.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsqrt7.mask.nxv8bf16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrsqrt7.nxv16bf16( + , + , + iXLen); + +define @intrinsic_vfrsqrt7_v_nxv16bf16_nxv16bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfrsqrt7.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsqrt7.nxv16bf16( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfrsqrt7.mask.nxv16bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfrsqrt7_mask_v_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfrsqrt7.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsqrt7.mask.nxv16bf16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrsqrt7.nxv32bf16( + , + , + iXLen); + +define @intrinsic_vfrsqrt7_v_nxv32bf16_nxv32bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfrsqrt7_v_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfrsqrt7.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsqrt7.nxv32bf16( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfrsqrt7.mask.nxv32bf16( + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfrsqrt7_mask_v_nxv32bf16_nxv32bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu +; CHECK-NEXT: vfrsqrt7.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsqrt7.mask.nxv32bf16( + %1, + %2, + %0, + iXLen %3, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfrsub-bf.ll new file mode 100644 index 0000000000000..54a6d48cfcc5b --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfrsub-bf.ll @@ -0,0 +1,282 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfrsub.nxv1bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfrsub_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsub.nxv1bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfrsub_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsub.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv2bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfrsub_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsub.nxv2bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfrsub_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsub.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv4bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfrsub_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsub.nxv4bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfrsub_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsub.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv8bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfrsub_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsub.nxv8bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfrsub_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v10, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsub.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv16bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfrsub_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsub.nxv16bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfrsub_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v12, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsub.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfrsub.nxv32bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfrsub_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfrsub_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfrsub.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsub.nxv32bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfrsub.mask.nxv32bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfrsub_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu +; CHECK-NEXT: vfrsub.vf v8, v16, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfrsub.mask.nxv32bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnj-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-bf.ll new file mode 100644 index 0000000000000..2cd698d9aaa3c --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnj-bf.ll @@ -0,0 +1,571 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnj_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfsgnj.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.nxv1bf16.nxv1bf16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnj_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.mask.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnj_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfsgnj.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.nxv2bf16.nxv2bf16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnj_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.mask.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnj_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfsgnj.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.nxv4bf16.nxv4bf16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnj_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.mask.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnj_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfsgnj.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.nxv8bf16.nxv8bf16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnj_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.mask.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnj_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfsgnj.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.nxv16bf16.nxv16bf16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnj_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.mask.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnj_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_vv_nxv32bf16_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfsgnj.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.nxv32bf16.nxv32bf16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnj_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16alt, m8, ta, mu +; CHECK-NEXT: vfsgnj.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.mask.nxv32bf16.nxv32bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnj.nxv1bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnj_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.nxv1bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnj_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnj.nxv2bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnj_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.nxv2bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnj_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnj.nxv4bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnj_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.nxv4bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnj_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnj.nxv8bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnj_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.nxv8bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnj_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v10, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnj.nxv16bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnj_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.nxv16bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnj_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v12, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnj.nxv32bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnj_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfsgnj.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.nxv32bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnj_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu +; CHECK-NEXT: vfsgnj.vf v8, v16, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnj.mask.nxv32bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-bf.ll new file mode 100644 index 0000000000000..08340becc9ed4 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjn-bf.ll @@ -0,0 +1,571 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnjn_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.nxv1bf16.nxv1bf16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjn_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnjn_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.nxv2bf16.nxv2bf16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjn_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnjn_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfsgnjn.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.nxv4bf16.nxv4bf16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjn_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfsgnjn.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnjn_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfsgnjn.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.nxv8bf16.nxv8bf16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjn_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfsgnjn.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnjn_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfsgnjn.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.nxv16bf16.nxv16bf16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjn_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfsgnjn.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnjn_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_vv_nxv32bf16_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfsgnjn.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.nxv32bf16.nxv32bf16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjn_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16alt, m8, ta, mu +; CHECK-NEXT: vfsgnjn.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.nxv32bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.nxv1bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnjn_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.nxv1bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjn_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.nxv2bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnjn_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.nxv2bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjn_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.nxv4bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnjn_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.nxv4bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjn_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.nxv8bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnjn_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.nxv8bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjn_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v10, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.nxv16bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnjn_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.nxv16bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjn_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v12, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.nxv32bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnjn_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfsgnjn.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.nxv32bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjn_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu +; CHECK-NEXT: vfsgnjn.vf v8, v16, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjn.mask.nxv32bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-bf.ll new file mode 100644 index 0000000000000..e51a42e2b8cea --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfsgnjx-bf.ll @@ -0,0 +1,571 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnjx_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfsgnjx.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.nxv1bf16.nxv1bf16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjx_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnjx_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfsgnjx.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.nxv2bf16.nxv2bf16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjx_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnjx_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfsgnjx.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.nxv4bf16.nxv4bf16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjx_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfsgnjx.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnjx_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfsgnjx.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.nxv8bf16.nxv8bf16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjx_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfsgnjx.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnjx_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfsgnjx.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.nxv16bf16.nxv16bf16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjx_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfsgnjx.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16( + , + , + , + iXLen); + +define @intrinsic_vfsgnjx_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_vv_nxv32bf16_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfsgnjx.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.nxv32bf16.nxv32bf16( + poison, + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16( + , + , + , + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjx_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16alt, m8, ta, mu +; CHECK-NEXT: vfsgnjx.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.nxv32bf16( + %0, + %1, + %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.nxv1bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnjx_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.nxv1bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjx_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.nxv2bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnjx_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.nxv2bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjx_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.nxv4bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnjx_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.nxv4bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjx_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.nxv8bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnjx_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.nxv8bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjx_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v10, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.nxv16bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnjx_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.nxv16bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjx_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v12, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.nxv32bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfsgnjx_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfsgnjx.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.nxv32bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfsgnjx_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu +; CHECK-NEXT: vfsgnjx.vf v8, v16, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsgnjx.mask.nxv32bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1down-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-bf.ll new file mode 100644 index 0000000000000..c65719c3a4c1a --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1down-bf.ll @@ -0,0 +1,288 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfslide1down.nxv1bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfslide1down_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1down.nxv1bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfslide1down_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1down.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv2bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfslide1down_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1down.nxv2bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfslide1down_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1down.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv4bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfslide1down_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1down.nxv4bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfslide1down_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1down.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv8bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfslide1down_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1down.nxv8bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfslide1down_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v10, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1down.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv16bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfslide1down_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1down.nxv16bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfslide1down_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v12, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1down.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfslide1down.nxv32bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfslide1down_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfslide1down_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfslide1down.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1down.nxv32bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfslide1down_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu +; CHECK-NEXT: vfslide1down.vf v8, v16, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1down.mask.nxv32bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfslide1up-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-bf.ll new file mode 100644 index 0000000000000..57a48986fdfcd --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfslide1up-bf.ll @@ -0,0 +1,294 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfslide1up.nxv1bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfslide1up_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1up.nxv1bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfslide1up_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1up.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv2bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfslide1up_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1up.nxv2bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfslide1up_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1up.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv4bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfslide1up_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfslide1up.vf v9, v8, fa0 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1up.nxv4bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfslide1up_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1up.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv8bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfslide1up_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfslide1up.vf v10, v8, fa0 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1up.nxv8bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfslide1up_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v10, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1up.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv16bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfslide1up_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfslide1up.vf v12, v8, fa0 +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1up.nxv16bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfslide1up_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v12, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1up.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfslide1up.nxv32bf16.bf16( + , + , + bfloat, + iXLen); + +define @intrinsic_vfslide1up_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfslide1up.vf v16, v8, fa0 +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1up.nxv32bf16.bf16( + poison, + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16( + , + , + bfloat, + , + iXLen, + iXLen); + +define @intrinsic_vfslide1up_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu +; CHECK-NEXT: vfslide1up.vf v8, v16, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfslide1up.mask.nxv32bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-bf.ll new file mode 100644 index 0000000000000..aea75211b70b5 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-bf.ll @@ -0,0 +1,559 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfsub.nxv1bf16.nxv1bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfsub_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsub_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.nxv1bf16.nxv1bf16( + poison, + %0, + %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1bf16_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.mask.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv2bf16.nxv2bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfsub_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsub_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.nxv2bf16.nxv2bf16( + poison, + %0, + %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2bf16_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.mask.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv4bf16.nxv4bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfsub_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsub_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.nxv4bf16.nxv4bf16( + poison, + %0, + %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4bf16_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfsub.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.mask.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv8bf16.nxv8bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfsub_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsub_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.nxv8bf16.nxv8bf16( + poison, + %0, + %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8bf16_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.mask.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv16bf16.nxv16bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfsub_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsub_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.nxv16bf16.nxv16bf16( + poison, + %0, + %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16bf16_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfsub.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.mask.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv32bf16.nxv32bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfsub_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsub_vv_nxv32bf16_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfsub.vv v8, v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.nxv32bf16.nxv32bf16( + poison, + %0, + %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfsub_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32bf16_nxv32bf16_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16alt, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.mask.nxv32bf16.nxv32bf16( + %0, + %1, + %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv1bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfsub_vf_nxv1bf16_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsub_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.nxv1bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfsub_mask_vf_nxv1bf16_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1bf16_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv2bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfsub_vf_nxv2bf16_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsub_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.nxv2bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfsub_mask_vf_nxv2bf16_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2bf16_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv4bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfsub_vf_nxv4bf16_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsub_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.nxv4bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfsub_mask_vf_nxv4bf16_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4bf16_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfsub.vf v8, v9, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv8bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfsub_vf_nxv8bf16_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsub_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.nxv8bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfsub_mask_vf_nxv8bf16_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8bf16_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfsub.vf v8, v10, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv16bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfsub_vf_nxv16bf16_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsub_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.nxv16bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfsub_mask_vf_nxv16bf16_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16bf16_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfsub.vf v8, v12, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfsub.nxv32bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfsub_vf_nxv32bf16_nxv32bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfsub_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: vfsub.vf v8, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.nxv32bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 7, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfsub.mask.nxv32bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfsub_mask_vf_nxv32bf16_nxv32bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv32bf16_nxv32bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, mu +; CHECK-NEXT: vfsub.vf v8, v16, fa0, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfsub.mask.nxv32bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 7, iXLen %4, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-bf.ll new file mode 100644 index 0000000000000..62feac824efad --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-bf.ll @@ -0,0 +1,519 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwadd_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfwadd.vv v10, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.nxv1bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfwadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwadd_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f32_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfwadd.vv v10, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.nxv2bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd_mask_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f32_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfwadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwadd_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f32_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v9 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwadd.vv v8, v11, v10 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.nxv4bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd_mask_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f32_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfwadd.vv v8, v10, v11, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwadd_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f32_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmv2r.v v12, v10 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwadd.vv v8, v14, v12 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.nxv8bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd_mask_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f32_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfwadd.vv v8, v12, v14, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwadd_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv16f32_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v20, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwadd.vv v8, v20, v16 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.nxv16bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd_mask_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f32_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfwadd.vv v8, v16, v20, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwadd_vf_nxv1f32_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f32_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfwadd.vf v9, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.nxv1f32.nxv1bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd_mask_vf_nxv1f32_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f32_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfwadd.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwadd_vf_nxv2f32_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f32_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfwadd.vf v9, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.nxv2f32.nxv2bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd_mask_vf_nxv2f32_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f32_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfwadd.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.mask.nxv2f32.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwadd_vf_nxv4f32_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f32_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwadd.vf v8, v10, fa0 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.nxv4f32.nxv4bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd_mask_vf_nxv4f32_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f32_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfwadd.vf v8, v10, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.mask.nxv4f32.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwadd_vf_nxv8f32_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f32_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwadd.vf v8, v12, fa0 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.nxv8f32.nxv8bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd_mask_vf_nxv8f32_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f32_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfwadd.vf v8, v12, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.mask.nxv8f32.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwadd_vf_nxv16f32_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv16f32_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwadd.vf v8, v16, fa0 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.nxv16f32.nxv16bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd_mask_vf_nxv16f32_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f32_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfwadd.vf v8, v16, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.mask.nxv16f32.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll new file mode 100644 index 0000000000000..69c9a4ea75c97 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-w-bf.ll @@ -0,0 +1,773 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f32_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfwadd.wv v8, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f32_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfwadd.wv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f32_nxv2f32_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfwadd.wv v8, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f32_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfwadd.wv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f32_nxv4f32_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfwadd.wv v8, v8, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f32_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfwadd.wv v8, v10, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f32_nxv8f32_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfwadd.wv v8, v8, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f32_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfwadd.wv v8, v12, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv16f32_nxv16f32_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfwadd.wv v8, v8, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.nxv16f32.nxv16bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f32_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl4re16.v v24, (a0) +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, a1, e16alt, m4, ta, mu +; CHECK-NEXT: vfwadd.wv v8, v16, v24, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv1f32.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f32_nxv1f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfwadd.wf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.nxv1f32.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_nxv1f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv2f32.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f32_nxv2f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfwadd.wf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.nxv2f32.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_nxv2f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv4f32.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f32_nxv4f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfwadd.wf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.nxv4f32.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_nxv4f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v10, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv8f32.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f32_nxv8f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfwadd.wf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.nxv8f32.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_nxv8f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v12, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv16f32.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv16f32_nxv16f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfwadd.wf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.nxv16f32.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_nxv16f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v16, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +define @intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfwadd.wv v8, v8, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv1f32.nxv1bf16( + %0, + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfwadd.wv v8, v8, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv2f32.nxv2bf16( + %0, + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfwadd.wv v8, v8, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv4f32.nxv4bf16( + %0, + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfwadd.wv v8, v8, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv8f32.nxv8bf16( + %0, + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfwadd.wv v8, v8, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv16f32.nxv16bf16( + %0, + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv1f32_nxv1f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv1f32.bf16( + %0, + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv2f32_nxv2f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv2f32.bf16( + %0, + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv4f32_nxv4f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv4f32.bf16( + %0, + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv8f32_nxv8f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv8f32.bf16( + %0, + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_tie_nxv16f32_nxv16f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfwadd.wf v8, v8, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.mask.nxv16f32.bf16( + %0, + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwadd.w_wv_untie_nxv1f32_nxv1f32_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv1f32_nxv1f32_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfwadd.wv v9, v9, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.nxv1f32.nxv1bf16( + poison, + %1, + %0, + iXLen 0, iXLen %2) + + ret %a +} + +define @intrinsic_vfwadd.w_wv_untie_nxv2f32_nxv2f32_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv2f32_nxv2f32_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfwadd.wv v9, v9, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.nxv2f32.nxv2bf16( + poison, + %1, + %0, + iXLen 0, iXLen %2) + + ret %a +} + +define @intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv4f32_nxv4f32_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfwadd.wv v10, v10, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.nxv4f32.nxv4bf16( + poison, + %1, + %0, + iXLen 0, iXLen %2) + + ret %a +} + +define @intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwadd.w_wv_untie_nxv8f32_nxv8f32_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfwadd.wv v12, v12, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwadd.w.nxv8f32.nxv8bf16( + poison, + %1, + %0, + iXLen 0, iXLen %2) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-x.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-x.ll new file mode 100644 index 0000000000000..b7df45bad36e6 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-x.ll @@ -0,0 +1,264 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv1bf16_nxv1i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv1bf16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, ma +; CHECK-NEXT: vfwcvt.f.x.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.x.v.nxv1bf16.nxv1i8( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv1bf16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1bf16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1bf16.nxv1i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv2bf16_nxv2i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv2bf16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, ma +; CHECK-NEXT: vfwcvt.f.x.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.x.v.nxv2bf16.nxv2i8( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv2bf16_nxv2i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv2bf16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv2bf16.nxv2i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv4bf16_nxv4i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv4bf16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, ma +; CHECK-NEXT: vfwcvt.f.x.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.x.v.nxv4bf16.nxv4i8( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv4bf16_nxv4i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv4bf16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv4bf16.nxv4i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv8bf16_nxv8i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv8bf16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vfwcvt.f.x.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.x.v.nxv8bf16.nxv8i8( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv8bf16_nxv8i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv8bf16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv8bf16.nxv8i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv16bf16_nxv16i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv16bf16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, ma +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vfwcvt.f.x.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.x.v.nxv16bf16.nxv16i8( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv16bf16_nxv16i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv16bf16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv16bf16.nxv16i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.x.v_nxv32bf16_nxv32i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.x.v_nxv32bf16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, ma +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: vfwcvt.f.x.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.x.v.nxv32bf16.nxv32i8( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwcvt_mask_f.x.v_nxv32bf16_nxv32i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv32bf16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.x.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv32bf16.nxv32i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-xu.ll b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-xu.ll new file mode 100644 index 0000000000000..c370261a77bc0 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwcvt-bf-xu.ll @@ -0,0 +1,264 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv1bf16_nxv1i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1bf16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, ma +; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv1bf16.nxv1i8( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv1bf16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv1bf16_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf8, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1bf16.nxv1i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv2bf16_nxv2i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv2bf16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, ma +; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv2bf16.nxv2i8( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv2bf16_nxv2i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv2bf16_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf4, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2bf16.nxv2i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv4bf16_nxv4i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv4bf16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, ma +; CHECK-NEXT: vfwcvt.f.xu.v v9, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv4bf16.nxv4i8( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv4bf16_nxv4i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv4bf16_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, mf2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4bf16.nxv4i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv8bf16_nxv8i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv8bf16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: vfwcvt.f.xu.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv8bf16.nxv8i8( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv8bf16_nxv8i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv8bf16_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m1, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8bf16.nxv8i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv16bf16_nxv16i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv16bf16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, ma +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: vfwcvt.f.xu.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv16bf16.nxv16i8( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv16bf16_nxv16i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv16bf16_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m2, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16bf16.nxv16i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8( + , + , + iXLen); + +define @intrinsic_vfwcvt_f.xu.v_nxv32bf16_nxv32i8( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_f.xu.v_nxv32bf16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, ma +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: vfwcvt.f.xu.v v8, v16 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.xu.v.nxv32bf16.nxv32i8( + poison, + %0, + iXLen %1) + + ret %a +} + +declare @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwcvt_mask_f.xu.v_nxv32bf16_nxv32i8( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwcvt_mask_f.xu.v_nxv32bf16_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e8alt, m4, ta, mu +; CHECK-NEXT: vfwcvt.f.xu.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32bf16.nxv32i8( + %0, + %1, + %2, + iXLen %3, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-bf.ll new file mode 100644 index 0000000000000..a3f667818ab0a --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmsac-bf.ll @@ -0,0 +1,506 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv1f32_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfwmsac.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.nxv1f32.nxv1bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfwmsac.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv2f32_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfwmsac.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.nxv2f32.nxv2bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfwmsac.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv4f32_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfwmsac.vv v8, v10, v11 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.nxv4f32.nxv4bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfwmsac.vv v8, v10, v11, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv8f32_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfwmsac.vv v8, v12, v14 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.nxv8f32.nxv8bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfwmsac.vv v8, v12, v14, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vv_nxv16f32_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfwmsac.vv v8, v16, v20 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.nxv16f32.nxv16bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfwmsac.vv v8, v16, v20, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv1f32.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_vf_nxv1f32_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv1f32_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfwmsac.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.nxv1f32.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv1f32.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_mask_vf_nxv1f32_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f32_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv1f32.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv2f32.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_vf_nxv2f32_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv2f32_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfwmsac.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.nxv2f32.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv2f32.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_mask_vf_nxv2f32_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f32_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv2f32.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv4f32.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_vf_nxv4f32_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv4f32_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfwmsac.vf v8, fa0, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.nxv4f32.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv4f32.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_mask_vf_nxv4f32_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f32_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv4f32.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv8f32.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_vf_nxv8f32_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv8f32_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfwmsac.vf v8, fa0, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.nxv8f32.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv8f32.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_mask_vf_nxv8f32_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f32_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv8f32.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.nxv16f32.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_vf_nxv16f32_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_vf_nxv16f32_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfwmsac.vf v8, fa0, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.nxv16f32.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwmsac.mask.nxv16f32.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmsac_mask_vf_nxv16f32_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv16f32_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfwmsac.vf v8, fa0, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmsac.mask.nxv16f32.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmul-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmul-bf.ll new file mode 100644 index 0000000000000..577b93af7a918 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwmul-bf.ll @@ -0,0 +1,519 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwmul_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv1f32_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfwmul.vv v10, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.nxv1bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfwmul.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwmul_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv2f32_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfwmul.vv v10, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.nxv2bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmul_mask_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv2f32_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfwmul.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwmul_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv4f32_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v9 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwmul.vv v8, v11, v10 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.nxv4bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmul_mask_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv4f32_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfwmul.vv v8, v10, v11, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwmul_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv8f32_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmv2r.v v12, v10 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwmul.vv v8, v14, v12 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.nxv8bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmul_mask_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv8f32_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfwmul.vv v8, v12, v14, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwmul_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_vv_nxv16f32_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v20, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwmul.vv v8, v20, v16 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.nxv16bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmul_mask_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv16f32_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfwmul.vv v8, v16, v20, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwmul_vf_nxv1f32_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv1f32_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfwmul.vf v9, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.nxv1f32.nxv1bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmul_mask_vf_nxv1f32_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv1f32_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfwmul.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwmul_vf_nxv2f32_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv2f32_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfwmul.vf v9, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.nxv2f32.nxv2bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmul_mask_vf_nxv2f32_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv2f32_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfwmul.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.mask.nxv2f32.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwmul_vf_nxv4f32_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv4f32_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwmul.vf v8, v10, fa0 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.nxv4f32.nxv4bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmul_mask_vf_nxv4f32_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv4f32_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfwmul.vf v8, v10, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.mask.nxv4f32.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwmul_vf_nxv8f32_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv8f32_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwmul.vf v8, v12, fa0 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.nxv8f32.nxv8bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmul_mask_vf_nxv8f32_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv8f32_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfwmul.vf v8, v12, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.mask.nxv8f32.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwmul_vf_nxv16f32_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_vf_nxv16f32_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwmul.vf v8, v16, fa0 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.nxv16f32.nxv16bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwmul_mask_vf_nxv16f32_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwmul_mask_vf_nxv16f32_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfwmul.vf v8, v16, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwmul.mask.nxv16f32.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-bf.ll new file mode 100644 index 0000000000000..1e05e4c7acf25 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-bf.ll @@ -0,0 +1,506 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfwnmacc.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.nxv1f32.nxv1bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfwnmacc.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv2f32_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfwnmacc.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.nxv2f32.nxv2bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfwnmacc.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv4f32_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfwnmacc.vv v8, v10, v11 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.nxv4f32.nxv4bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfwnmacc.vv v8, v10, v11, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv8f32_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfwnmacc.vv v8, v12, v14 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.nxv8f32.nxv8bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfwnmacc.vv v8, v12, v14, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vv_nxv16f32_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfwnmacc.vv v8, v16, v20 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.nxv16f32.nxv16bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfwnmacc.vv v8, v16, v20, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv1f32.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_vf_nxv1f32_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv1f32_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.nxv1f32.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_mask_vf_nxv1f32_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f32_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv1f32.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv2f32.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_vf_nxv2f32_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv2f32_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.nxv2f32.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_mask_vf_nxv2f32_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f32_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv2f32.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv4f32.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_vf_nxv4f32_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv4f32_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.nxv4f32.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_mask_vf_nxv4f32_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f32_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv4f32.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv8f32.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_vf_nxv8f32_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv8f32_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.nxv8f32.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_mask_vf_nxv8f32_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f32_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv8f32.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.nxv16f32.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_vf_nxv16f32_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_vf_nxv16f32_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.nxv16f32.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmacc_mask_vf_nxv16f32_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv16f32_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfwnmacc.vf v8, fa0, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmacc.mask.nxv16f32.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-bf.ll new file mode 100644 index 0000000000000..223ad4f7483f6 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-bf.ll @@ -0,0 +1,506 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv1f32_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfwnmsac.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.nxv1f32.nxv1bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfwnmsac.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv2f32_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfwnmsac.vv v8, v9, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.nxv2f32.nxv2bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfwnmsac.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv4f32_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfwnmsac.vv v8, v10, v11 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.nxv4f32.nxv4bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfwnmsac.vv v8, v10, v11, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv8f32_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfwnmsac.vv v8, v12, v14 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.nxv8f32.nxv8bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfwnmsac.vv v8, v12, v14, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16( + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vv_nxv16f32_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfwnmsac.vv v8, v16, v20 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.nxv16f32.nxv16bf16( + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfwnmsac.vv v8, v16, v20, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv1f32.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_vf_nxv1f32_bf16_nxv1bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv1f32_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, ma +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.nxv1f32.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_mask_vf_nxv1f32_bf16_nxv1bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f32_bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv1f32.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv2f32.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_vf_nxv2f32_bf16_nxv2bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv2f32_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, ma +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.nxv2f32.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_mask_vf_nxv2f32_bf16_nxv2bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f32_bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv2f32.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv4f32.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_vf_nxv4f32_bf16_nxv4bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv4f32_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, ma +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.nxv4f32.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_mask_vf_nxv4f32_bf16_nxv4bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f32_bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv4f32.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv8f32.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_vf_nxv8f32_bf16_nxv8bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv8f32_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, ma +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.nxv8f32.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_mask_vf_nxv8f32_bf16_nxv8bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f32_bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv8f32.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.nxv16f32.bf16( + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_vf_nxv16f32_bf16_nxv16bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_vf_nxv16f32_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, ma +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.nxv16f32.bf16( + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 0) + + ret %a +} + +declare @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16( + , + bfloat, + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwnmsac_mask_vf_nxv16f32_bf16_nxv16bf16( %0, bfloat %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv16f32_bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: vfwnmsac.vf v8, fa0, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwnmsac.mask.nxv16f32.bf16( + %0, + bfloat %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 0) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-bf.ll new file mode 100644 index 0000000000000..d993e4e610d2c --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-bf.ll @@ -0,0 +1,519 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwsub_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfwsub.vv v10, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.nxv1bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub_mask_vv_nxv1f32_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f32_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfwsub.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwsub_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f32_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfwsub.vv v10, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.nxv2bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub_mask_vv_nxv2f32_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f32_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfwsub.vv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwsub_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f32_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v9 +; CHECK-NEXT: vmv1r.v v11, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwsub.vv v8, v11, v10 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.nxv4bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub_mask_vv_nxv4f32_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f32_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfwsub.vv v8, v10, v11, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwsub_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f32_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmv2r.v v12, v10 +; CHECK-NEXT: vmv2r.v v14, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwsub.vv v8, v14, v12 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.nxv8bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub_mask_vv_nxv8f32_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f32_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfwsub.vv v8, v12, v14, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwsub_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv16f32_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmv4r.v v16, v12 +; CHECK-NEXT: vmv4r.v v20, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwsub.vv v8, v20, v16 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.nxv16bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub_mask_vv_nxv16f32_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv16f32_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfwsub.vv v8, v16, v20, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwsub_vf_nxv1f32_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f32_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfwsub.vf v9, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.nxv1f32.nxv1bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub_mask_vf_nxv1f32_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f32_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfwsub.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.mask.nxv1f32.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwsub_vf_nxv2f32_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f32_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfwsub.vf v9, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.nxv2f32.nxv2bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub_mask_vf_nxv2f32_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f32_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfwsub.vf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.mask.nxv2f32.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwsub_vf_nxv4f32_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f32_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmv1r.v v10, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwsub.vf v8, v10, fa0 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.nxv4f32.nxv4bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub_mask_vf_nxv4f32_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f32_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfwsub.vf v8, v10, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.mask.nxv4f32.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwsub_vf_nxv8f32_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f32_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmv2r.v v12, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwsub.vf v8, v12, fa0 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.nxv8f32.nxv8bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub_mask_vf_nxv8f32_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f32_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfwsub.vf v8, v12, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.mask.nxv8f32.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwsub_vf_nxv16f32_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv16f32_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmv4r.v v16, v8 +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vfwsub.vf v8, v16, fa0 +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.nxv16f32.nxv16bf16.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub_mask_vf_nxv16f32_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f32_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfwsub.vf v8, v16, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.mask.nxv16f32.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll new file mode 100644 index 0000000000000..11066d9487684 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-w-bf.ll @@ -0,0 +1,773 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f32_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfwsub.wv v8, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f32_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfwsub.wv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f32_nxv2f32_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfwsub.wv v8, v8, v9 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f32_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfwsub.wv v8, v9, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f32_nxv4f32_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfwsub.wv v8, v8, v10 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f32_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfwsub.wv v8, v10, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f32_nxv8f32_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfwsub.wv v8, v8, v12 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f32_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfwsub.wv v8, v12, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16( + , + , + , + iXLen, iXLen); + +define @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f32_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfwsub.wv v8, v8, v16 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv16f32.nxv16bf16( + poison, + %0, + %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16( + , + , + , + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f32_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl4re16.v v24, (a0) +; CHECK-NEXT: fsrmi a0, 0 +; CHECK-NEXT: vsetvli zero, a1, e16alt, m4, ta, mu +; CHECK-NEXT: vfwsub.wv v8, v16, v24, v0.t +; CHECK-NEXT: fsrm a0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16( + %0, + %1, + %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv1f32.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f32_nxv1f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfwsub.wf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv1f32.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_nxv1f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv2f32.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f32_nxv2f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfwsub.wf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv2f32.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_nxv2f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v9, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv4f32.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f32_nxv4f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfwsub.wf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv4f32.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_nxv4f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v10, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv8f32.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f32_nxv8f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfwsub.wf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv8f32.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_nxv8f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v12, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv16f32.bf16( + , + , + bfloat, + iXLen, iXLen); + +define @intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv16f32_nxv16f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vfwsub.wf v8, v8, fa0 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv16f32.bf16( + poison, + %0, + bfloat %1, + iXLen 0, iXLen %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16( + , + , + bfloat, + , + iXLen, iXLen, iXLen); + +define @intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_nxv16f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v16, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen 0, iXLen %4, iXLen 1) + + ret %a +} + +define @intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv1f32_nxv1f32_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfwsub.wv v8, v8, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv1f32.nxv1bf16( + %0, + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv2f32_nxv2f32_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfwsub.wv v8, v8, v9, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv2f32.nxv2bf16( + %0, + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv4f32_nxv4f32_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfwsub.wv v8, v8, v10, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv4f32.nxv4bf16( + %0, + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv8f32_nxv8f32_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfwsub.wv v8, v8, v12, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv8f32.nxv8bf16( + %0, + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16bf16( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_tie_nxv16f32_nxv16f32_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfwsub.wv v8, v8, v16, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv16f32.nxv16bf16( + %0, + %0, + %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv1f32_nxv1f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv1f32.bf16( + %0, + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv2f32_nxv2f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv2f32.bf16( + %0, + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv4f32_nxv4f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv4f32.bf16( + %0, + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv8f32_nxv8f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv8f32.bf16( + %0, + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_bf16( %0, bfloat %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_tie_nxv16f32_nxv16f32_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vfwsub.wf v8, v8, fa0, v0.t +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.mask.nxv16f32.bf16( + %0, + %0, + bfloat %1, + %2, + iXLen 0, iXLen %3, iXLen 1) + + ret %a +} + +define @intrinsic_vfwsub.w_wv_untie_nxv1f32_nxv1f32_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv1f32_nxv1f32_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vfwsub.wv v9, v9, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv1f32.nxv1bf16( + poison, + %1, + %0, + iXLen 0, iXLen %2) + + ret %a +} + +define @intrinsic_vfwsub.w_wv_untie_nxv2f32_nxv2f32_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv2f32_nxv2f32_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vfwsub.wv v9, v9, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv2f32.nxv2bf16( + poison, + %1, + %0, + iXLen 0, iXLen %2) + + ret %a +} + +define @intrinsic_vfwsub.w_wv_untie_nxv4f32_nxv4f32_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv4f32_nxv4f32_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vfwsub.wv v10, v10, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv2r.v v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv4f32.nxv4bf16( + poison, + %1, + %0, + iXLen 0, iXLen %2) + + ret %a +} + +define @intrinsic_vfwsub.w_wv_untie_nxv8f32_nxv8f32_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vfwsub.w_wv_untie_nxv8f32_nxv8f32_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: fsrmi a1, 0 +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vfwsub.wv v12, v12, v8 +; CHECK-NEXT: fsrm a1 +; CHECK-NEXT: vmv4r.v v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vfwsub.w.nxv8f32.nxv8bf16( + poison, + %1, + %0, + iXLen 0, iXLen %2) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfeq-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmfeq-bf.ll new file mode 100644 index 0000000000000..9bd859b3452f2 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmfeq-bf.ll @@ -0,0 +1,496 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vmfeq.nxv1bf16( + , + , + iXLen); + +define @intrinsic_vmfeq_vv_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_vv_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vmfeq.vv v0, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfeq.nxv1bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfeq.mask.nxv1bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfeq_mask_vv_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmfeq.vv v0, v8, v9 +; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfeq.nxv1bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfeq.mask.nxv1bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfeq.nxv2bf16( + , + , + iXLen); + +define @intrinsic_vmfeq_vv_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_vv_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vmfeq.vv v0, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfeq.nxv2bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfeq.mask.nxv2bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfeq_mask_vv_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmfeq.vv v0, v8, v9 +; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfeq.nxv2bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfeq.mask.nxv2bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfeq.nxv4bf16( + , + , + iXLen); + +define @intrinsic_vmfeq_vv_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_vv_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmfeq.vv v0, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfeq.nxv4bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfeq.mask.nxv4bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfeq_mask_vv_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmfeq.vv v0, v8, v9 +; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t +; CHECK-NEXT: vmv.v.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfeq.nxv4bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfeq.mask.nxv4bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfeq.nxv8bf16( + , + , + iXLen); + +define @intrinsic_vmfeq_vv_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_vv_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmfeq.vv v0, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfeq.nxv8bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfeq.mask.nxv8bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfeq_mask_vv_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 +; CHECK-NEXT: vmfeq.vv v0, v8, v10 +; CHECK-NEXT: vmfeq.vv v14, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v14 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfeq.nxv8bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfeq.mask.nxv8bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfeq.nxv16bf16( + , + , + iXLen); + +define @intrinsic_vmfeq_vv_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_vv_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmfeq.vv v0, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfeq.nxv16bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfeq.mask.nxv16bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfeq_mask_vv_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 +; CHECK-NEXT: vmfeq.vv v0, v8, v12 +; CHECK-NEXT: vmfeq.vv v20, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v20 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfeq.nxv16bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfeq.mask.nxv16bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfeq.nxv1bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfeq_vf_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_vf_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfeq.nxv1bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfeq.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfeq_mask_vf_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmfeq.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfeq.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfeq.nxv2bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfeq_vf_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_vf_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfeq.nxv2bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfeq.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfeq_mask_vf_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmfeq.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfeq.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfeq.nxv4bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfeq_vf_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_vf_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfeq.nxv4bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfeq.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfeq_mask_vf_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmfeq.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv.v.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfeq.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfeq.nxv8bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfeq_vf_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_vf_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfeq.nxv8bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfeq.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfeq_mask_vf_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmfeq.vf v11, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfeq.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfeq.nxv16bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfeq_vf_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_vf_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmfeq.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfeq.nxv16bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfeq.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfeq_mask_vf_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vmv1r.v v13, v0 +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmfeq.vf v13, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v13 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfeq.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfge-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmfge-bf.ll new file mode 100644 index 0000000000000..73946dc1a744c --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmfge-bf.ll @@ -0,0 +1,496 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vmfge.nxv1bf16( + , + , + iXLen); + +define @intrinsic_vmfge_vv_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfge.nxv1bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv1bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfge_mask_vv_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfge.nxv1bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfge.mask.nxv1bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv2bf16( + , + , + iXLen); + +define @intrinsic_vmfge_vv_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfge.nxv2bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv2bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfge_mask_vv_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfge.nxv2bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfge.mask.nxv2bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv4bf16( + , + , + iXLen); + +define @intrinsic_vmfge_vv_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfge.nxv4bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv4bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfge_mask_vv_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t +; CHECK-NEXT: vmv.v.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfge.nxv4bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfge.mask.nxv4bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv8bf16( + , + , + iXLen); + +define @intrinsic_vmfge_vv_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmfle.vv v0, v10, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfge.nxv8bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv8bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfge_mask_vv_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 +; CHECK-NEXT: vmfle.vv v0, v10, v8 +; CHECK-NEXT: vmfle.vv v14, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v14 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfge.nxv8bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfge.mask.nxv8bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv16bf16( + , + , + iXLen); + +define @intrinsic_vmfge_vv_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmfle.vv v0, v12, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfge.nxv16bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv16bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfge_mask_vv_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 +; CHECK-NEXT: vmfle.vv v0, v12, v8 +; CHECK-NEXT: vmfle.vv v20, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v20 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfge.nxv16bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfge.mask.nxv16bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv1bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfge_vf_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vf_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfge.nxv1bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfge_mask_vf_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfge.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv2bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfge_vf_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vf_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfge.nxv2bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfge_mask_vf_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfge.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv4bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfge_vf_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vf_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfge.nxv4bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfge_mask_vf_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv.v.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfge.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv8bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfge_vf_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vf_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfge.nxv8bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfge_mask_vf_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmfge.vf v11, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfge.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv16bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfge_vf_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vf_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmfge.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfge.nxv16bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfge_mask_vf_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vmv1r.v v13, v0 +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmfge.vf v13, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v13 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfge.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfgt-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmfgt-bf.ll new file mode 100644 index 0000000000000..fac324ca5c125 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmfgt-bf.ll @@ -0,0 +1,496 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vmfgt.nxv1bf16( + , + , + iXLen); + +define @intrinsic_vmfgt_vv_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfgt.nxv1bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv1bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfgt_mask_vv_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfgt.nxv1bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfgt.mask.nxv1bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv2bf16( + , + , + iXLen); + +define @intrinsic_vmfgt_vv_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfgt.nxv2bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv2bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfgt_mask_vv_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfgt.nxv2bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfgt.mask.nxv2bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv4bf16( + , + , + iXLen); + +define @intrinsic_vmfgt_vv_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfgt.nxv4bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv4bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfgt_mask_vv_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t +; CHECK-NEXT: vmv.v.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfgt.nxv4bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfgt.mask.nxv4bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv8bf16( + , + , + iXLen); + +define @intrinsic_vmfgt_vv_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmflt.vv v0, v10, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfgt.nxv8bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv8bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfgt_mask_vv_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 +; CHECK-NEXT: vmflt.vv v0, v10, v8 +; CHECK-NEXT: vmflt.vv v14, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v14 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfgt.nxv8bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfgt.mask.nxv8bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv16bf16( + , + , + iXLen); + +define @intrinsic_vmfgt_vv_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmflt.vv v0, v12, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfgt.nxv16bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv16bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfgt_mask_vv_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 +; CHECK-NEXT: vmflt.vv v0, v12, v8 +; CHECK-NEXT: vmflt.vv v20, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v20 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfgt.nxv16bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfgt.mask.nxv16bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv1bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfgt_vf_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vf_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfgt.nxv1bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfgt_mask_vf_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfgt.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv2bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfgt_vf_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vf_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfgt.nxv2bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfgt_mask_vf_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfgt.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv4bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfgt_vf_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vf_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfgt.nxv4bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfgt_mask_vf_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv.v.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfgt.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv8bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfgt_vf_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vf_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfgt.nxv8bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfgt_mask_vf_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmfgt.vf v11, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfgt.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv16bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfgt_vf_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vf_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmfgt.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfgt.nxv16bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfgt_mask_vf_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vmv1r.v v13, v0 +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmfgt.vf v13, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v13 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfgt.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfle-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmfle-bf.ll new file mode 100644 index 0000000000000..8356b7bbd3ff7 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmfle-bf.ll @@ -0,0 +1,496 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vmfle.nxv1bf16( + , + , + iXLen); + +define @intrinsic_vmfle_vv_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfle_vv_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vmfle.vv v0, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfle.nxv1bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfle.mask.nxv1bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfle_mask_vv_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmfle.vv v0, v8, v9 +; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfle.nxv1bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfle.mask.nxv1bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfle.nxv2bf16( + , + , + iXLen); + +define @intrinsic_vmfle_vv_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfle_vv_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vmfle.vv v0, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfle.nxv2bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfle.mask.nxv2bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfle_mask_vv_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmfle.vv v0, v8, v9 +; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfle.nxv2bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfle.mask.nxv2bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfle.nxv4bf16( + , + , + iXLen); + +define @intrinsic_vmfle_vv_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfle_vv_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmfle.vv v0, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfle.nxv4bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfle.mask.nxv4bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfle_mask_vv_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmfle.vv v0, v8, v9 +; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t +; CHECK-NEXT: vmv.v.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfle.nxv4bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfle.mask.nxv4bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfle.nxv8bf16( + , + , + iXLen); + +define @intrinsic_vmfle_vv_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfle_vv_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmfle.vv v0, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfle.nxv8bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfle.mask.nxv8bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfle_mask_vv_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 +; CHECK-NEXT: vmfle.vv v0, v8, v10 +; CHECK-NEXT: vmfle.vv v14, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v14 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfle.nxv8bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfle.mask.nxv8bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfle.nxv16bf16( + , + , + iXLen); + +define @intrinsic_vmfle_vv_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfle_vv_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmfle.vv v0, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfle.nxv16bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfle.mask.nxv16bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfle_mask_vv_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 +; CHECK-NEXT: vmfle.vv v0, v8, v12 +; CHECK-NEXT: vmfle.vv v20, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v20 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfle.nxv16bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfle.mask.nxv16bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfle.nxv1bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfle_vf_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfle_vf_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfle.nxv1bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfle.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfle_mask_vf_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfle.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfle.nxv2bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfle_vf_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfle_vf_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfle.nxv2bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfle.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfle_mask_vf_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfle.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfle.nxv4bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfle_vf_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfle_vf_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfle.nxv4bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfle.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfle_mask_vf_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv.v.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfle.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfle.nxv8bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfle_vf_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfle_vf_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfle.nxv8bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfle.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfle_mask_vf_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmfle.vf v11, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfle.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfle.nxv16bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfle_vf_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfle_vf_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmfle.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfle.nxv16bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfle.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfle_mask_vf_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vmv1r.v v13, v0 +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmfle.vf v13, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v13 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfle.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vmflt-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmflt-bf.ll new file mode 100644 index 0000000000000..2e1bcc5e87bfc --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmflt-bf.ll @@ -0,0 +1,496 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vmflt.nxv1bf16( + , + , + iXLen); + +define @intrinsic_vmflt_vv_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmflt_vv_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vmflt.vv v0, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmflt.nxv1bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmflt.mask.nxv1bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmflt_mask_vv_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmflt.vv v0, v8, v9 +; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmflt.nxv1bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmflt.mask.nxv1bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmflt.nxv2bf16( + , + , + iXLen); + +define @intrinsic_vmflt_vv_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmflt_vv_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vmflt.vv v0, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmflt.nxv2bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmflt.mask.nxv2bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmflt_mask_vv_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmflt.vv v0, v8, v9 +; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmflt.nxv2bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmflt.mask.nxv2bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmflt.nxv4bf16( + , + , + iXLen); + +define @intrinsic_vmflt_vv_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmflt_vv_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmflt.vv v0, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmflt.nxv4bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmflt.mask.nxv4bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmflt_mask_vv_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmflt.vv v0, v8, v9 +; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t +; CHECK-NEXT: vmv.v.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmflt.nxv4bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmflt.mask.nxv4bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmflt.nxv8bf16( + , + , + iXLen); + +define @intrinsic_vmflt_vv_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmflt_vv_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmflt.vv v0, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmflt.nxv8bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmflt.mask.nxv8bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmflt_mask_vv_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 +; CHECK-NEXT: vmflt.vv v0, v8, v10 +; CHECK-NEXT: vmflt.vv v14, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v14 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmflt.nxv8bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmflt.mask.nxv8bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmflt.nxv16bf16( + , + , + iXLen); + +define @intrinsic_vmflt_vv_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmflt_vv_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmflt.vv v0, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmflt.nxv16bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmflt.mask.nxv16bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmflt_mask_vv_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 +; CHECK-NEXT: vmflt.vv v0, v8, v12 +; CHECK-NEXT: vmflt.vv v20, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v20 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmflt.nxv16bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmflt.mask.nxv16bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmflt.nxv1bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmflt_vf_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmflt_vf_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmflt.nxv1bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmflt.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmflt_mask_vf_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmflt.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmflt.nxv2bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmflt_vf_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmflt_vf_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmflt.nxv2bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmflt.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmflt_mask_vf_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmflt.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmflt.nxv4bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmflt_vf_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmflt_vf_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmflt.nxv4bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmflt.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmflt_mask_vf_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv.v.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmflt.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmflt.nxv8bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmflt_vf_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmflt_vf_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmflt.nxv8bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmflt.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmflt_mask_vf_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmflt.vf v11, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmflt.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmflt.nxv16bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmflt_vf_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmflt_vf_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmflt.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmflt.nxv16bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmflt.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmflt_mask_vf_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vmv1r.v v13, v0 +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmflt.vf v13, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v13 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmflt.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfne-bf.ll b/llvm/test/CodeGen/RISCV/rvv/vmfne-bf.ll new file mode 100644 index 0000000000000..283ffc500fdde --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmfne-bf.ll @@ -0,0 +1,496 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+experimental-zvfbfa \ +; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s + +declare @llvm.riscv.vmfne.nxv1bf16( + , + , + iXLen); + +define @intrinsic_vmfne_vv_nxv1bf16_nxv1bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfne_vv_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfne.nxv1bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfne.mask.nxv1bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfne_mask_vv_nxv1bf16_nxv1bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1bf16_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmfne.vv v0, v8, v9 +; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfne.nxv1bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfne.mask.nxv1bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfne.nxv2bf16( + , + , + iXLen); + +define @intrinsic_vmfne_vv_nxv2bf16_nxv2bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfne_vv_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfne.nxv2bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfne.mask.nxv2bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfne_mask_vv_nxv2bf16_nxv2bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2bf16_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmfne.vv v0, v8, v9 +; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfne.nxv2bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfne.mask.nxv2bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfne.nxv4bf16( + , + , + iXLen); + +define @intrinsic_vmfne_vv_nxv4bf16_nxv4bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfne_vv_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v9 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfne.nxv4bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfne.mask.nxv4bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfne_mask_vv_nxv4bf16_nxv4bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4bf16_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmfne.vv v0, v8, v9 +; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t +; CHECK-NEXT: vmv.v.v v0, v11 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfne.nxv4bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfne.mask.nxv4bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfne.nxv8bf16( + , + , + iXLen); + +define @intrinsic_vmfne_vv_nxv8bf16_nxv8bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfne_vv_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfne.nxv8bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfne.mask.nxv8bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfne_mask_vv_nxv8bf16_nxv8bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8bf16_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vmv1r.v v14, v0 +; CHECK-NEXT: vmfne.vv v0, v8, v10 +; CHECK-NEXT: vmfne.vv v14, v10, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v14 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfne.nxv8bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfne.mask.nxv8bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfne.nxv16bf16( + , + , + iXLen); + +define @intrinsic_vmfne_vv_nxv16bf16_nxv16bf16( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfne_vv_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmfne.vv v0, v8, v12 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfne.nxv16bf16( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfne.mask.nxv16bf16( + , + , + , + , + iXLen); + +define @intrinsic_vmfne_mask_vv_nxv16bf16_nxv16bf16( %0, %1, %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv16bf16_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vmv1r.v v20, v0 +; CHECK-NEXT: vmfne.vv v0, v8, v12 +; CHECK-NEXT: vmfne.vv v20, v12, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v20 +; CHECK-NEXT: ret +entry: + %mask = call @llvm.riscv.vmfne.nxv16bf16( + %1, + %2, + iXLen %4) + %a = call @llvm.riscv.vmfne.mask.nxv16bf16( + %0, + %2, + %3, + %mask, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfne.nxv1bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfne_vf_nxv1bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfne_vf_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfne.nxv1bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfne.mask.nxv1bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfne_mask_vf_nxv1bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmfne.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfne.mask.nxv1bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfne.nxv2bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfne_vf_nxv2bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfne_vf_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfne.nxv2bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfne.mask.nxv2bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfne_mask_vf_nxv2bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmfne.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfne.mask.nxv2bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfne.nxv4bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfne_vf_nxv4bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfne_vf_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfne.nxv4bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfne.mask.nxv4bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfne_mask_vf_nxv4bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, mu +; CHECK-NEXT: vmv1r.v v10, v0 +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmfne.vf v10, v8, fa0, v0.t +; CHECK-NEXT: vmv.v.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfne.mask.nxv4bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfne.nxv8bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfne_vf_nxv8bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfne_vf_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfne.nxv8bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfne.mask.nxv8bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfne_mask_vf_nxv8bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, mu +; CHECK-NEXT: vmv1r.v v11, v0 +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmfne.vf v11, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v11 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfne.mask.nxv8bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} + +declare @llvm.riscv.vmfne.nxv16bf16.bf16( + , + bfloat, + iXLen); + +define @intrinsic_vmfne_vf_nxv16bf16_bf16( %0, bfloat %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfne_vf_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: vmfne.vf v0, v8, fa0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfne.nxv16bf16.bf16( + %0, + bfloat %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.vmfne.mask.nxv16bf16.bf16( + , + , + bfloat, + , + iXLen); + +define @intrinsic_vmfne_mask_vf_nxv16bf16_bf16( %0, %1, bfloat %2, %3, iXLen %4) nounwind { +; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv16bf16_bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, mu +; CHECK-NEXT: vmv1r.v v13, v0 +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmfne.vf v13, v8, fa0, v0.t +; CHECK-NEXT: vmv1r.v v0, v13 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.vmfne.mask.nxv16bf16.bf16( + %0, + %1, + bfloat %2, + %3, + iXLen %4) + + ret %a +} +