diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst index f48767fdef4469..81ea54c0a93831 100644 --- a/llvm/docs/LangRef.rst +++ b/llvm/docs/LangRef.rst @@ -16692,6 +16692,8 @@ The canonicalization operation may be optimized away if: - The result is consumed only by (or fused with) other floating-point operations. That is, the bits of the floating-point value are not examined. +.. _int_fmuladd: + '``llvm.fmuladd.*``' Intrinsic ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -19389,6 +19391,57 @@ Examples: %also.r = select <4 x i1> %mask, <4 x float> %t, <4 x float> poison +.. _int_vp_fmuladd: + +'``llvm.vp.fmuladd.*``' Intrinsics +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Syntax: +""""""" +This is an overloaded intrinsic. + +:: + + declare <16 x float> @llvm.vp.fmuladd.v16f32 (<16 x float> , <16 x float> , <16 x float> , <16 x i1> , i32 ) + declare @llvm.vp.fmuladd.nxv4f32 ( , , , , i32 ) + declare <256 x double> @llvm.vp.fmuladd.v256f64 (<256 x double> , <256 x double> , <256 x double> , <256 x i1> , i32 ) + +Overview: +""""""""" + +Predicated floating-point multiply-add of two vectors of floating-point values +that can be fused if code generator determines that (a) the target instruction +set has support for a fused operation, and (b) that the fused operation is more +efficient than the equivalent, separate pair of mul and add instructions. + +Arguments: +"""""""""" + +The first three operands and the result have the same vector of floating-point +type. The fourth operand is the vector mask and has the same number of elements +as the result vector type. The fifth operand is the explicit vector length of +the operation. + +Semantics: +"""""""""" + +The '``llvm.vp.fmuladd``' intrinsic performs floating-point multiply-add (:ref:`llvm.fuladd `) +of the first, second, and third vector operand on each enabled lane. The result +on disabled lanes is a :ref:`poison value `. The operation is +performed in the default floating-point environment. + +Examples: +""""""""" + +.. code-block:: llvm + + %r = call <4 x float> @llvm.vp.fmuladd.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c, <4 x i1> %mask, i32 %evl) + ;; For all lanes below %evl, %r is lane-wise equivalent to %also.r + + %t = call <4 x float> @llvm.fmuladd(<4 x float> %a, <4 x float> %b, <4 x float> %c) + %also.r = select <4 x i1> %mask, <4 x float> %t, <4 x float> poison + + .. _int_vp_reduce_add: '``llvm.vp.reduce.add.*``' Intrinsics diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td index 6437915aba4ca7..fa65d349ad4d60 100644 --- a/llvm/include/llvm/IR/Intrinsics.td +++ b/llvm/include/llvm/IR/Intrinsics.td @@ -1575,6 +1575,12 @@ let IntrProperties = [IntrNoMem, IntrNoSync, IntrWillReturn] in { LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_i32_ty]>; + def int_vp_fmuladd : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], + [ LLVMMatchType<0>, + LLVMMatchType<0>, + LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; def int_vp_minnum : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], [ LLVMMatchType<0>, LLVMMatchType<0>, diff --git a/llvm/include/llvm/IR/VPIntrinsics.def b/llvm/include/llvm/IR/VPIntrinsics.def index 4e6fba139ea91e..a5b5b2aa225d0a 100644 --- a/llvm/include/llvm/IR/VPIntrinsics.def +++ b/llvm/include/llvm/IR/VPIntrinsics.def @@ -248,6 +248,11 @@ BEGIN_REGISTER_VP(vp_fma, 3, 4, VP_FMA, -1) VP_PROPERTY_CONSTRAINEDFP(1, 1, experimental_constrained_fma) END_REGISTER_VP(vp_fma, VP_FMA) +// llvm.vp.fmuladd(x,y,z,mask,vlen) +BEGIN_REGISTER_VP(vp_fmuladd, 3, 4, VP_FMULADD, -1) +VP_PROPERTY_CONSTRAINEDFP(1, 1, experimental_constrained_fmuladd) +END_REGISTER_VP(vp_fmuladd, VP_FMULADD) + // llvm.vp.minnum(x, y, mask,vlen) BEGIN_REGISTER_VP(vp_minnum, 2, 3, VP_FMINNUM, -1) END_REGISTER_VP(vp_minnum, VP_FMINNUM) diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 6b85e4d5048b06..a5939ffec06faf 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -7699,6 +7699,25 @@ void SelectionDAGBuilder::visitVectorPredicationIntrinsic( case ISD::EXPERIMENTAL_VP_STRIDED_STORE: visitVPStridedStore(VPIntrin, OpValues); break; + case ISD::VP_FMULADD: { + assert(OpValues.size() == 5 && "Unexpected number of operands"); + SDNodeFlags SDFlags; + if (auto *FPMO = dyn_cast(&VPIntrin)) + SDFlags.copyFMF(*FPMO); + if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict && + TLI.isFMAFasterThanFMulAndFAdd(DAG.getMachineFunction(), ValueVTs[0])) { + setValue(&VPIntrin, DAG.getNode(ISD::VP_FMA, DL, VTs, OpValues, SDFlags)); + } else { + SDValue Mul = DAG.getNode( + ISD::VP_FMUL, DL, VTs, + {OpValues[0], OpValues[1], OpValues[3], OpValues[4]}, SDFlags); + SDValue Add = + DAG.getNode(ISD::VP_FADD, DL, VTs, + {Mul, OpValues[2], OpValues[3], OpValues[4]}, SDFlags); + setValue(&VPIntrin, Add); + } + break; + } } } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll new file mode 100644 index 00000000000000..b080a2610d063d --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll @@ -0,0 +1,838 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v,+m -target-abi=ilp32d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v,+m -target-abi=lp64d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare <2 x half> @llvm.vp.fmuladd.v2f16(<2 x half>, <2 x half>, <2 x half>, <2 x i1>, i32) + +define <2 x half> @vfma_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x half> %c, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call <2 x half> @llvm.vp.fmuladd.v2f16(<2 x half> %va, <2 x half> %b, <2 x half> %c, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +define <2 x half> @vfma_vv_v2f16_unmasked(<2 x half> %va, <2 x half> %b, <2 x half> %c, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_v2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> poison, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fmuladd.v2f16(<2 x half> %va, <2 x half> %b, <2 x half> %c, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +define <2 x half> @vfma_vf_v2f16(<2 x half> %va, half %b, <2 x half> %vc, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_v2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x half> poison, half %b, i32 0 + %vb = shufflevector <2 x half> %elt.head, <2 x half> poison, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fmuladd.v2f16(<2 x half> %va, <2 x half> %vb, <2 x half> %vc, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +define <2 x half> @vfma_vf_v2f16_unmasked(<2 x half> %va, half %b, <2 x half> %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_v2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x half> poison, half %b, i32 0 + %vb = shufflevector <2 x half> %elt.head, <2 x half> poison, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> poison, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x half> @llvm.vp.fmuladd.v2f16(<2 x half> %va, <2 x half> %vb, <2 x half> %vc, <2 x i1> %m, i32 %evl) + ret <2 x half> %v +} + +declare <4 x half> @llvm.vp.fmuladd.v4f16(<4 x half>, <4 x half>, <4 x half>, <4 x i1>, i32) + +define <4 x half> @vfma_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x half> %c, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call <4 x half> @llvm.vp.fmuladd.v4f16(<4 x half> %va, <4 x half> %b, <4 x half> %c, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vfma_vv_v4f16_unmasked(<4 x half> %va, <4 x half> %b, <4 x half> %c, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_v4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> poison, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fmuladd.v4f16(<4 x half> %va, <4 x half> %b, <4 x half> %c, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vfma_vf_v4f16(<4 x half> %va, half %b, <4 x half> %vc, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_v4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x half> poison, half %b, i32 0 + %vb = shufflevector <4 x half> %elt.head, <4 x half> poison, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fmuladd.v4f16(<4 x half> %va, <4 x half> %vb, <4 x half> %vc, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +define <4 x half> @vfma_vf_v4f16_unmasked(<4 x half> %va, half %b, <4 x half> %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_v4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x half> poison, half %b, i32 0 + %vb = shufflevector <4 x half> %elt.head, <4 x half> poison, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> poison, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x half> @llvm.vp.fmuladd.v4f16(<4 x half> %va, <4 x half> %vb, <4 x half> %vc, <4 x i1> %m, i32 %evl) + ret <4 x half> %v +} + +declare <8 x half> @llvm.vp.fmuladd.v8f16(<8 x half>, <8 x half>, <8 x half>, <8 x i1>, i32) + +define <8 x half> @vfma_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x half> %c, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret + %v = call <8 x half> @llvm.vp.fmuladd.v8f16(<8 x half> %va, <8 x half> %b, <8 x half> %c, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vfma_vv_v8f16_unmasked(<8 x half> %va, <8 x half> %b, <8 x half> %c, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_v8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> poison, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fmuladd.v8f16(<8 x half> %va, <8 x half> %b, <8 x half> %c, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vfma_vf_v8f16(<8 x half> %va, half %b, <8 x half> %vc, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_v8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x half> poison, half %b, i32 0 + %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fmuladd.v8f16(<8 x half> %va, <8 x half> %vb, <8 x half> %vc, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +define <8 x half> @vfma_vf_v8f16_unmasked(<8 x half> %va, half %b, <8 x half> %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_v8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x half> poison, half %b, i32 0 + %vb = shufflevector <8 x half> %elt.head, <8 x half> poison, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> poison, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x half> @llvm.vp.fmuladd.v8f16(<8 x half> %va, <8 x half> %vb, <8 x half> %vc, <8 x i1> %m, i32 %evl) + ret <8 x half> %v +} + +declare <16 x half> @llvm.vp.fmuladd.v16f16(<16 x half>, <16 x half>, <16 x half>, <16 x i1>, i32) + +define <16 x half> @vfma_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x half> %c, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmadd.vv v10, v8, v12, v0.t +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v = call <16 x half> @llvm.vp.fmuladd.v16f16(<16 x half> %va, <16 x half> %b, <16 x half> %c, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +define <16 x half> @vfma_vv_v16f16_unmasked(<16 x half> %va, <16 x half> %b, <16 x half> %c, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_v16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmadd.vv v8, v10, v12 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> poison, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fmuladd.v16f16(<16 x half> %va, <16 x half> %b, <16 x half> %c, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +define <16 x half> @vfma_vf_v16f16(<16 x half> %va, half %b, <16 x half> %vc, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_v16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x half> poison, half %b, i32 0 + %vb = shufflevector <16 x half> %elt.head, <16 x half> poison, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fmuladd.v16f16(<16 x half> %va, <16 x half> %vb, <16 x half> %vc, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +define <16 x half> @vfma_vf_v16f16_unmasked(<16 x half> %va, half %b, <16 x half> %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_v16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x half> poison, half %b, i32 0 + %vb = shufflevector <16 x half> %elt.head, <16 x half> poison, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> poison, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x half> @llvm.vp.fmuladd.v16f16(<16 x half> %va, <16 x half> %vb, <16 x half> %vc, <16 x i1> %m, i32 %evl) + ret <16 x half> %v +} + +declare <2 x float> @llvm.vp.fmuladd.v2f32(<2 x float>, <2 x float>, <2 x float>, <2 x i1>, i32) + +define <2 x float> @vfma_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x float> %c, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call <2 x float> @llvm.vp.fmuladd.v2f32(<2 x float> %va, <2 x float> %b, <2 x float> %c, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +define <2 x float> @vfma_vv_v2f32_unmasked(<2 x float> %va, <2 x float> %b, <2 x float> %c, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_v2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> poison, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fmuladd.v2f32(<2 x float> %va, <2 x float> %b, <2 x float> %c, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +define <2 x float> @vfma_vf_v2f32(<2 x float> %va, float %b, <2 x float> %vc, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_v2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x float> poison, float %b, i32 0 + %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fmuladd.v2f32(<2 x float> %va, <2 x float> %vb, <2 x float> %vc, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +define <2 x float> @vfma_vf_v2f32_unmasked(<2 x float> %va, float %b, <2 x float> %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_v2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x float> poison, float %b, i32 0 + %vb = shufflevector <2 x float> %elt.head, <2 x float> poison, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> poison, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x float> @llvm.vp.fmuladd.v2f32(<2 x float> %va, <2 x float> %vb, <2 x float> %vc, <2 x i1> %m, i32 %evl) + ret <2 x float> %v +} + +declare <4 x float> @llvm.vp.fmuladd.v4f32(<4 x float>, <4 x float>, <4 x float>, <4 x i1>, i32) + +define <4 x float> @vfma_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x float> %c, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret + %v = call <4 x float> @llvm.vp.fmuladd.v4f32(<4 x float> %va, <4 x float> %b, <4 x float> %c, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vfma_vv_v4f32_unmasked(<4 x float> %va, <4 x float> %b, <4 x float> %c, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_v4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> poison, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fmuladd.v4f32(<4 x float> %va, <4 x float> %b, <4 x float> %c, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vfma_vf_v4f32(<4 x float> %va, float %b, <4 x float> %vc, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_v4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x float> poison, float %b, i32 0 + %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fmuladd.v4f32(<4 x float> %va, <4 x float> %vb, <4 x float> %vc, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +define <4 x float> @vfma_vf_v4f32_unmasked(<4 x float> %va, float %b, <4 x float> %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_v4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x float> poison, float %b, i32 0 + %vb = shufflevector <4 x float> %elt.head, <4 x float> poison, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> poison, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x float> @llvm.vp.fmuladd.v4f32(<4 x float> %va, <4 x float> %vb, <4 x float> %vc, <4 x i1> %m, i32 %evl) + ret <4 x float> %v +} + +declare <8 x float> @llvm.vp.fmuladd.v8f32(<8 x float>, <8 x float>, <8 x float>, <8 x i1>, i32) + +define <8 x float> @vfma_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x float> %c, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmadd.vv v10, v8, v12, v0.t +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v = call <8 x float> @llvm.vp.fmuladd.v8f32(<8 x float> %va, <8 x float> %b, <8 x float> %c, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vfma_vv_v8f32_unmasked(<8 x float> %va, <8 x float> %b, <8 x float> %c, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_v8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmadd.vv v8, v10, v12 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> poison, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fmuladd.v8f32(<8 x float> %va, <8 x float> %b, <8 x float> %c, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vfma_vf_v8f32(<8 x float> %va, float %b, <8 x float> %vc, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_v8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x float> poison, float %b, i32 0 + %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fmuladd.v8f32(<8 x float> %va, <8 x float> %vb, <8 x float> %vc, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +define <8 x float> @vfma_vf_v8f32_unmasked(<8 x float> %va, float %b, <8 x float> %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_v8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x float> poison, float %b, i32 0 + %vb = shufflevector <8 x float> %elt.head, <8 x float> poison, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> poison, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x float> @llvm.vp.fmuladd.v8f32(<8 x float> %va, <8 x float> %vb, <8 x float> %vc, <8 x i1> %m, i32 %evl) + ret <8 x float> %v +} + +declare <16 x float> @llvm.vp.fmuladd.v16f32(<16 x float>, <16 x float>, <16 x float>, <16 x i1>, i32) + +define <16 x float> @vfma_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x float> %c, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmadd.vv v12, v8, v16, v0.t +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret + %v = call <16 x float> @llvm.vp.fmuladd.v16f32(<16 x float> %va, <16 x float> %b, <16 x float> %c, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +define <16 x float> @vfma_vv_v16f32_unmasked(<16 x float> %va, <16 x float> %b, <16 x float> %c, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_v16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmadd.vv v8, v12, v16 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> poison, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fmuladd.v16f32(<16 x float> %va, <16 x float> %b, <16 x float> %c, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +define <16 x float> @vfma_vf_v16f32(<16 x float> %va, float %b, <16 x float> %vc, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_v16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x float> poison, float %b, i32 0 + %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fmuladd.v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +define <16 x float> @vfma_vf_v16f32_unmasked(<16 x float> %va, float %b, <16 x float> %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_v16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x float> poison, float %b, i32 0 + %vb = shufflevector <16 x float> %elt.head, <16 x float> poison, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> poison, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x float> @llvm.vp.fmuladd.v16f32(<16 x float> %va, <16 x float> %vb, <16 x float> %vc, <16 x i1> %m, i32 %evl) + ret <16 x float> %v +} + +declare <2 x double> @llvm.vp.fmuladd.v2f64(<2 x double>, <2 x double>, <2 x double>, <2 x i1>, i32) + +define <2 x double> @vfma_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x double> %c, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret + %v = call <2 x double> @llvm.vp.fmuladd.v2f64(<2 x double> %va, <2 x double> %b, <2 x double> %c, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +define <2 x double> @vfma_vv_v2f64_unmasked(<2 x double> %va, <2 x double> %b, <2 x double> %c, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_v2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> poison, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fmuladd.v2f64(<2 x double> %va, <2 x double> %b, <2 x double> %c, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +define <2 x double> @vfma_vf_v2f64(<2 x double> %va, double %b, <2 x double> %vc, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_v2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <2 x double> poison, double %b, i32 0 + %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fmuladd.v2f64(<2 x double> %va, <2 x double> %vb, <2 x double> %vc, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +define <2 x double> @vfma_vf_v2f64_unmasked(<2 x double> %va, double %b, <2 x double> %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_v2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement <2 x double> poison, double %b, i32 0 + %vb = shufflevector <2 x double> %elt.head, <2 x double> poison, <2 x i32> zeroinitializer + %head = insertelement <2 x i1> poison, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x double> @llvm.vp.fmuladd.v2f64(<2 x double> %va, <2 x double> %vb, <2 x double> %vc, <2 x i1> %m, i32 %evl) + ret <2 x double> %v +} + +declare <4 x double> @llvm.vp.fmuladd.v4f64(<4 x double>, <4 x double>, <4 x double>, <4 x i1>, i32) + +define <4 x double> @vfma_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x double> %c, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmadd.vv v10, v8, v12, v0.t +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v = call <4 x double> @llvm.vp.fmuladd.v4f64(<4 x double> %va, <4 x double> %b, <4 x double> %c, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vfma_vv_v4f64_unmasked(<4 x double> %va, <4 x double> %b, <4 x double> %c, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_v4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmadd.vv v8, v10, v12 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> poison, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fmuladd.v4f64(<4 x double> %va, <4 x double> %b, <4 x double> %c, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vfma_vf_v4f64(<4 x double> %va, double %b, <4 x double> %vc, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_v4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <4 x double> poison, double %b, i32 0 + %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fmuladd.v4f64(<4 x double> %va, <4 x double> %vb, <4 x double> %vc, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +define <4 x double> @vfma_vf_v4f64_unmasked(<4 x double> %va, double %b, <4 x double> %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_v4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <4 x double> poison, double %b, i32 0 + %vb = shufflevector <4 x double> %elt.head, <4 x double> poison, <4 x i32> zeroinitializer + %head = insertelement <4 x i1> poison, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x double> @llvm.vp.fmuladd.v4f64(<4 x double> %va, <4 x double> %vb, <4 x double> %vc, <4 x i1> %m, i32 %evl) + ret <4 x double> %v +} + +declare <8 x double> @llvm.vp.fmuladd.v8f64(<8 x double>, <8 x double>, <8 x double>, <8 x i1>, i32) + +define <8 x double> @vfma_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x double> %c, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmadd.vv v12, v8, v16, v0.t +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret + %v = call <8 x double> @llvm.vp.fmuladd.v8f64(<8 x double> %va, <8 x double> %b, <8 x double> %c, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vfma_vv_v8f64_unmasked(<8 x double> %va, <8 x double> %b, <8 x double> %c, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_v8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmadd.vv v8, v12, v16 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> poison, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fmuladd.v8f64(<8 x double> %va, <8 x double> %b, <8 x double> %c, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vfma_vf_v8f64(<8 x double> %va, double %b, <8 x double> %vc, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_v8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x double> poison, double %b, i32 0 + %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fmuladd.v8f64(<8 x double> %va, <8 x double> %vb, <8 x double> %vc, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +define <8 x double> @vfma_vf_v8f64_unmasked(<8 x double> %va, double %b, <8 x double> %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_v8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x double> poison, double %b, i32 0 + %vb = shufflevector <8 x double> %elt.head, <8 x double> poison, <8 x i32> zeroinitializer + %head = insertelement <8 x i1> poison, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x double> @llvm.vp.fmuladd.v8f64(<8 x double> %va, <8 x double> %vb, <8 x double> %vc, <8 x i1> %m, i32 %evl) + ret <8 x double> %v +} + +declare <15 x double> @llvm.vp.fmuladd.v15f64(<15 x double>, <15 x double>, <15 x double>, <15 x i1>, i32) + +define <15 x double> @vfma_vv_v15f64(<15 x double> %va, <15 x double> %b, <15 x double> %c, <15 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_v15f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vfmadd.vv v16, v8, v24, v0.t +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret + %v = call <15 x double> @llvm.vp.fmuladd.v15f64(<15 x double> %va, <15 x double> %b, <15 x double> %c, <15 x i1> %m, i32 %evl) + ret <15 x double> %v +} + +define <15 x double> @vfma_vv_v15f64_unmasked(<15 x double> %va, <15 x double> %b, <15 x double> %c, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_v15f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vfmadd.vv v8, v16, v24 +; CHECK-NEXT: ret + %head = insertelement <15 x i1> poison, i1 true, i32 0 + %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer + %v = call <15 x double> @llvm.vp.fmuladd.v15f64(<15 x double> %va, <15 x double> %b, <15 x double> %c, <15 x i1> %m, i32 %evl) + ret <15 x double> %v +} + +declare <16 x double> @llvm.vp.fmuladd.v16f64(<16 x double>, <16 x double>, <16 x double>, <16 x i1>, i32) + +define <16 x double> @vfma_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x double> %c, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vfmadd.vv v16, v8, v24, v0.t +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret + %v = call <16 x double> @llvm.vp.fmuladd.v16f64(<16 x double> %va, <16 x double> %b, <16 x double> %c, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +define <16 x double> @vfma_vv_v16f64_unmasked(<16 x double> %va, <16 x double> %b, <16 x double> %c, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_v16f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vle64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vfmadd.vv v8, v16, v24 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> poison, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fmuladd.v16f64(<16 x double> %va, <16 x double> %b, <16 x double> %c, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +define <16 x double> @vfma_vf_v16f64(<16 x double> %va, double %b, <16 x double> %vc, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_v16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <16 x double> poison, double %b, i32 0 + %vb = shufflevector <16 x double> %elt.head, <16 x double> poison, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fmuladd.v16f64(<16 x double> %va, <16 x double> %vb, <16 x double> %vc, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +define <16 x double> @vfma_vf_v16f64_unmasked(<16 x double> %va, double %b, <16 x double> %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_v16f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement <16 x double> poison, double %b, i32 0 + %vb = shufflevector <16 x double> %elt.head, <16 x double> poison, <16 x i32> zeroinitializer + %head = insertelement <16 x i1> poison, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x double> @llvm.vp.fmuladd.v16f64(<16 x double> %va, <16 x double> %vb, <16 x double> %vc, <16 x i1> %m, i32 %evl) + ret <16 x double> %v +} + +declare <32 x double> @llvm.vp.fmuladd.v32f64(<32 x double>, <32 x double>, <32 x double>, <32 x i1>, i32) + +define <32 x double> @vfma_vv_v32f64(<32 x double> %va, <32 x double> %b, <32 x double> %c, <32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_v32f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: li a3, 48 +; CHECK-NEXT: mul a1, a1, a3 +; CHECK-NEXT: sub sp, sp, a1 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu +; CHECK-NEXT: vslidedown.vi v0, v0, 2 +; CHECK-NEXT: addi a1, a2, 128 +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vle64.v v24, (a1) +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: li a3, 24 +; CHECK-NEXT: mul a1, a1, a3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: addi a1, a0, 128 +; CHECK-NEXT: vle64.v v24, (a1) +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: li a3, 40 +; CHECK-NEXT: mul a1, a1, a3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: addi a3, a4, -16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 5 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: li a1, 0 +; CHECK-NEXT: bltu a4, a3, .LBB50_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a1, a3 +; CHECK-NEXT: .LBB50_2: +; CHECK-NEXT: vle64.v v8, (a2) +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 4 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: li a0, 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: li a2, 24 +; CHECK-NEXT: mul a1, a1, a2 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vl8re8.v v24, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: li a2, 40 +; CHECK-NEXT: mul a1, a1, a2 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vfmadd.vv v16, v24, v8, v0.t +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: li a2, 40 +; CHECK-NEXT: mul a1, a1, a2 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: bltu a4, a0, .LBB50_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: li a4, 16 +; CHECK-NEXT: .LBB50_4: +; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 5 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfmadd.vv v16, v8, v24, v0.t +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 40 +; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 48 +; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %v = call <32 x double> @llvm.vp.fmuladd.v32f64(<32 x double> %va, <32 x double> %b, <32 x double> %c, <32 x i1> %m, i32 %evl) + ret <32 x double> %v +} + +define <32 x double> @vfma_vv_v32f64_unmasked(<32 x double> %va, <32 x double> %b, <32 x double> %c, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_v32f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: li a3, 24 +; CHECK-NEXT: mul a1, a1, a3 +; CHECK-NEXT: sub sp, sp, a1 +; CHECK-NEXT: addi a1, a2, 128 +; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu +; CHECK-NEXT: vle64.v v24, (a1) +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: addi a1, a0, 128 +; CHECK-NEXT: vle64.v v24, (a1) +; CHECK-NEXT: addi a3, a4, -16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: li a1, 0 +; CHECK-NEXT: bltu a4, a3, .LBB51_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a1, a3 +; CHECK-NEXT: .LBB51_2: +; CHECK-NEXT: vle64.v v8, (a2) +; CHECK-NEXT: addi a2, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; CHECK-NEXT: vle64.v v0, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: li a0, 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload +; CHECK-NEXT: vfmadd.vv v24, v16, v8 +; CHECK-NEXT: bltu a4, a0, .LBB51_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: li a4, 16 +; CHECK-NEXT: .LBB51_4: +; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, mu +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfmadd.vv v0, v16, v8 +; CHECK-NEXT: vmv.v.v v8, v0 +; CHECK-NEXT: vmv8r.v v16, v24 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 24 +; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %head = insertelement <32 x i1> poison, i1 true, i32 0 + %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer + %v = call <32 x double> @llvm.vp.fmuladd.v32f64(<32 x double> %va, <32 x double> %b, <32 x double> %c, <32 x i1> %m, i32 %evl) + ret <32 x double> %v +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll new file mode 100644 index 00000000000000..51857381f903e9 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll @@ -0,0 +1,7725 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v,+m -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v,+m -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s + +declare @llvm.vp.fmuladd.nxv1f16(, , , , i32) + +define @vfma_vv_nxv1f16( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.vp.fmuladd.nxv1f16( %va, %b, %c, %m, i32 %evl) + ret %v +} + +define @vfma_vv_nxv1f16_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv1f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv1f16( %va, %b, %c, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv1f16( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv1f16( %va, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv1f16_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv1f16_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv1f16( %vb, %va, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv1f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv1f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv1f16( %va, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv1f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv1f16_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv1f16( %vb, %va, %vc, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fmuladd.nxv2f16(, , , , i32) + +define @vfma_vv_nxv2f16( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.vp.fmuladd.nxv2f16( %va, %b, %c, %m, i32 %evl) + ret %v +} + +define @vfma_vv_nxv2f16_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv2f16( %va, %b, %c, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv2f16( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv2f16( %va, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv2f16_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv2f16_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv2f16( %vb, %va, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv2f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv2f16( %va, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv2f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv2f16_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv2f16( %vb, %va, %vc, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fmuladd.nxv4f16(, , , , i32) + +define @vfma_vv_nxv4f16( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.vp.fmuladd.nxv4f16( %va, %b, %c, %m, i32 %evl) + ret %v +} + +define @vfma_vv_nxv4f16_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv4f16( %va, %b, %c, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv4f16( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv4f16( %va, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv4f16_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv4f16_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv4f16( %vb, %va, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv4f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv4f16( %va, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv4f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv4f16_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv4f16( %vb, %va, %vc, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fmuladd.nxv8f16(, , , , i32) + +define @vfma_vv_nxv8f16( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmadd.vv v10, v8, v12, v0.t +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v = call @llvm.vp.fmuladd.nxv8f16( %va, %b, %c, %m, i32 %evl) + ret %v +} + +define @vfma_vv_nxv8f16_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmadd.vv v8, v10, v12 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv8f16( %va, %b, %c, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv8f16( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv8f16( %va, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv8f16_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv8f16_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv8f16( %vb, %va, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv8f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv8f16( %va, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv8f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv8f16_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv8f16( %vb, %va, %vc, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fmuladd.nxv16f16(, , , , i32) + +define @vfma_vv_nxv16f16( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmadd.vv v12, v8, v16, v0.t +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret + %v = call @llvm.vp.fmuladd.nxv16f16( %va, %b, %c, %m, i32 %evl) + ret %v +} + +define @vfma_vv_nxv16f16_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmadd.vv v8, v12, v16 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv16f16( %va, %b, %c, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv16f16( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv16f16( %va, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv16f16_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv16f16_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv16f16( %vb, %va, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv16f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv16f16( %va, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv16f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv16f16_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv16f16( %vb, %va, %vc, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fmuladd.nxv32f16(, , , , i32) + +define @vfma_vv_nxv32f16( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vfmadd.vv v16, v8, v24, v0.t +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret + %v = call @llvm.vp.fmuladd.nxv32f16( %va, %b, %c, %m, i32 %evl) + ret %v +} + +define @vfma_vv_nxv32f16_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv32f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vfmadd.vv v8, v16, v24 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv32f16( %va, %b, %c, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv32f16( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv32f16( %va, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv32f16_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv32f16_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv32f16( %vb, %va, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv32f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv32f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv32f16( %va, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv32f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv32f16_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv32f16( %vb, %va, %vc, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fmuladd.nxv1f32(, , , , i32) + +define @vfma_vv_nxv1f32( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.vp.fmuladd.nxv1f32( %va, %b, %c, %m, i32 %evl) + ret %v +} + +define @vfma_vv_nxv1f32_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv1f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv1f32( %va, %b, %c, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv1f32( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv1f32( %va, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv1f32_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv1f32_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv1f32( %vb, %va, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv1f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv1f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv1f32( %va, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv1f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv1f32_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv1f32( %vb, %va, %vc, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fmuladd.nxv2f32(, , , , i32) + +define @vfma_vv_nxv2f32( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.vp.fmuladd.nxv2f32( %va, %b, %c, %m, i32 %evl) + ret %v +} + +define @vfma_vv_nxv2f32_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv2f32( %va, %b, %c, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv2f32( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv2f32( %va, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv2f32_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv2f32_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv2f32( %vb, %va, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv2f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv2f32( %va, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv2f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv2f32_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv2f32( %vb, %va, %vc, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fmuladd.nxv4f32(, , , , i32) + +define @vfma_vv_nxv4f32( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmadd.vv v10, v8, v12, v0.t +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v = call @llvm.vp.fmuladd.nxv4f32( %va, %b, %c, %m, i32 %evl) + ret %v +} + +define @vfma_vv_nxv4f32_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmadd.vv v8, v10, v12 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv4f32( %va, %b, %c, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv4f32( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv4f32( %va, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv4f32_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv4f32_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv4f32( %vb, %va, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv4f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv4f32( %va, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv4f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv4f32_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv4f32( %vb, %va, %vc, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fmuladd.nxv8f32(, , , , i32) + +define @vfma_vv_nxv8f32( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmadd.vv v12, v8, v16, v0.t +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret + %v = call @llvm.vp.fmuladd.nxv8f32( %va, %b, %c, %m, i32 %evl) + ret %v +} + +define @vfma_vv_nxv8f32_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmadd.vv v8, v12, v16 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv8f32( %va, %b, %c, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv8f32( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv8f32( %va, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv8f32_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv8f32_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv8f32( %vb, %va, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv8f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv8f32( %va, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv8f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv8f32_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv8f32( %vb, %va, %vc, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fmuladd.nxv16f32(, , , , i32) + +define @vfma_vv_nxv16f32( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vfmadd.vv v16, v8, v24, v0.t +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret + %v = call @llvm.vp.fmuladd.nxv16f32( %va, %b, %c, %m, i32 %evl) + ret %v +} + +define @vfma_vv_nxv16f32_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vfmadd.vv v8, v16, v24 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv16f32( %va, %b, %c, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv16f32( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv16f32( %va, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv16f32_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv16f32_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv16f32( %vb, %va, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv16f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv16f32( %va, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv16f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv16f32_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv16f32( %vb, %va, %vc, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fmuladd.nxv1f64(, , , , i32) + +define @vfma_vv_nxv1f64( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret + %v = call @llvm.vp.fmuladd.nxv1f64( %va, %b, %c, %m, i32 %evl) + ret %v +} + +define @vfma_vv_nxv1f64_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv1f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv1f64( %va, %b, %c, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv1f64( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv1f64( %va, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv1f64_commute( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv1f64_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv1f64( %vb, %va, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv1f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv1f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv1f64( %va, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv1f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv1f64_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv1f64( %vb, %va, %vc, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fmuladd.nxv2f64(, , , , i32) + +define @vfma_vv_nxv2f64( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmadd.vv v10, v8, v12, v0.t +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %v = call @llvm.vp.fmuladd.nxv2f64( %va, %b, %c, %m, i32 %evl) + ret %v +} + +define @vfma_vv_nxv2f64_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmadd.vv v8, v10, v12 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv2f64( %va, %b, %c, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv2f64( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv2f64( %va, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv2f64_commute( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv2f64_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv2f64( %vb, %va, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv2f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv2f64( %va, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv2f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv2f64_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv2f64( %vb, %va, %vc, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fmuladd.nxv4f64(, , , , i32) + +define @vfma_vv_nxv4f64( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmadd.vv v12, v8, v16, v0.t +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret + %v = call @llvm.vp.fmuladd.nxv4f64( %va, %b, %c, %m, i32 %evl) + ret %v +} + +define @vfma_vv_nxv4f64_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmadd.vv v8, v12, v16 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv4f64( %va, %b, %c, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv4f64( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv4f64( %va, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv4f64_commute( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv4f64_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv4f64( %vb, %va, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv4f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv4f64( %va, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv4f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv4f64_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv4f64( %vb, %va, %vc, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fmuladd.nxv7f64(, , , , i32) + +define @vfma_vv_nxv7f64( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv7f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vfmadd.vv v16, v8, v24, v0.t +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret + %v = call @llvm.vp.fmuladd.nxv7f64( %va, %b, %c, %m, i32 %evl) + ret %v +} + +define @vfma_vv_nxv7f64_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv7f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vfmadd.vv v8, v16, v24 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv7f64( %va, %b, %c, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fmuladd.nxv8f64(, , , , i32) + +define @vfma_vv_nxv8f64( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vfmadd.vv v16, v8, v24, v0.t +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret + %v = call @llvm.vp.fmuladd.nxv8f64( %va, %b, %c, %m, i32 %evl) + ret %v +} + +define @vfma_vv_nxv8f64_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vfmadd.vv v8, v16, v24 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv8f64( %va, %b, %c, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv8f64( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv8f64( %va, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv8f64_commute( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv8f64_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv8f64( %vb, %va, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv8f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv8f64( %va, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfma_vf_nxv8f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vf_nxv8f64_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmadd.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv8f64( %vb, %va, %vc, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fmuladd.nxv16f64(, , , , i32) + +define @vfma_vv_nxv16f64( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv16f64: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: li a3, 48 +; CHECK-NEXT: mul a1, a1, a3 +; CHECK-NEXT: sub sp, sp, a1 +; CHECK-NEXT: vmv1r.v v1, v0 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: li a3, 24 +; CHECK-NEXT: mul a1, a1, a3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 5 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: li a3, 0 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: vsetvli a5, zero, e8, mf4, ta, mu +; CHECK-NEXT: slli a5, a1, 3 +; CHECK-NEXT: add a6, a2, a5 +; CHECK-NEXT: vl8re64.v v8, (a6) +; CHECK-NEXT: csrr a6, vlenb +; CHECK-NEXT: slli a6, a6, 3 +; CHECK-NEXT: add a6, sp, a6 +; CHECK-NEXT: addi a6, a6, 16 +; CHECK-NEXT: vs8r.v v8, (a6) # Unknown-size Folded Spill +; CHECK-NEXT: add a5, a0, a5 +; CHECK-NEXT: vl8re64.v v8, (a5) +; CHECK-NEXT: csrr a5, vlenb +; CHECK-NEXT: li a6, 40 +; CHECK-NEXT: mul a5, a5, a6 +; CHECK-NEXT: add a5, sp, a5 +; CHECK-NEXT: addi a5, a5, 16 +; CHECK-NEXT: vs8r.v v8, (a5) # Unknown-size Folded Spill +; CHECK-NEXT: srli a6, a1, 3 +; CHECK-NEXT: sub a5, a4, a1 +; CHECK-NEXT: vslidedown.vx v0, v0, a6 +; CHECK-NEXT: bltu a4, a5, .LBB92_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a3, a5 +; CHECK-NEXT: .LBB92_2: +; CHECK-NEXT: vl8re64.v v8, (a2) +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: slli a2, a2, 4 +; CHECK-NEXT: add a2, sp, a2 +; CHECK-NEXT: addi a2, a2, 16 +; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; CHECK-NEXT: vl8re64.v v8, (a0) +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a2, 24 +; CHECK-NEXT: mul a0, a0, a2 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a2, 40 +; CHECK-NEXT: mul a0, a0, a2 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfmadd.vv v8, v24, v16, v0.t +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a2, 40 +; CHECK-NEXT: mul a0, a0, a2 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill +; CHECK-NEXT: bltu a4, a1, .LBB92_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: mv a4, a1 +; CHECK-NEXT: .LBB92_4: +; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, mu +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 5 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfmadd.vv v8, v24, v16, v0.t +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 40 +; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 48 +; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %v = call @llvm.vp.fmuladd.nxv16f64( %va, %b, %c, %m, i32 %evl) + ret %v +} + +define @vfma_vv_nxv16f64_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfma_vv_nxv16f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: li a3, 24 +; CHECK-NEXT: mul a1, a1, a3 +; CHECK-NEXT: sub sp, sp, a1 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a3, a1, 3 +; CHECK-NEXT: add a5, a2, a3 +; CHECK-NEXT: vl8re64.v v24, (a5) +; CHECK-NEXT: csrr a5, vlenb +; CHECK-NEXT: slli a5, a5, 3 +; CHECK-NEXT: add a5, sp, a5 +; CHECK-NEXT: addi a5, a5, 16 +; CHECK-NEXT: vs8r.v v24, (a5) # Unknown-size Folded Spill +; CHECK-NEXT: add a3, a0, a3 +; CHECK-NEXT: vl8re64.v v24, (a3) +; CHECK-NEXT: sub a5, a4, a1 +; CHECK-NEXT: csrr a3, vlenb +; CHECK-NEXT: slli a3, a3, 4 +; CHECK-NEXT: add a3, sp, a3 +; CHECK-NEXT: addi a3, a3, 16 +; CHECK-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; CHECK-NEXT: li a3, 0 +; CHECK-NEXT: bltu a4, a5, .LBB93_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a3, a5 +; CHECK-NEXT: .LBB93_2: +; CHECK-NEXT: vl8re64.v v8, (a2) +; CHECK-NEXT: addi a2, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; CHECK-NEXT: vl8re64.v v0, (a0) +; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, mu +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfmadd.vv v24, v16, v8 +; CHECK-NEXT: bltu a4, a1, .LBB93_4 +; CHECK-NEXT: # %bb.3: +; CHECK-NEXT: mv a4, a1 +; CHECK-NEXT: .LBB93_4: +; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, mu +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8re8.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8re8.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vfmadd.vv v0, v16, v8 +; CHECK-NEXT: vmv.v.v v8, v0 +; CHECK-NEXT: vmv8r.v v16, v24 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: li a1, 24 +; CHECK-NEXT: mul a0, a0, a1 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.fmuladd.nxv16f64( %va, %b, %c, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fneg.nxv1f16(, , i32) + +define @vfmsub_vv_nxv1f16( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vv_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmsub.vv v9, v8, v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %negc = call @llvm.vp.fneg.nxv1f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f16( %va, %b, %negc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vv_nxv1f16_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vv_nxv1f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmsub.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negc = call @llvm.vp.fneg.nxv1f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f16( %va, %b, %negc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv1f16( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv1f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f16( %va, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv1f16_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv1f16_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv1f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f16( %vb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv1f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv1f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv1f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f16( %va, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv1f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv1f16_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv1f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f16( %vb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv1f16( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfnmadd.vv v9, v8, v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv1f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv1f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f16( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv1f16_commuted( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv1f16_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv1f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv1f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f16( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv1f16_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv1f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv1f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv1f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f16( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv1f16_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv1f16_unmasked_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv1f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv1f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f16( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv1f16( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv1f16( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv1f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f16( %negva, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv1f16_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv1f16_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv1f16( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv1f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f16( %vb, %negva, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv1f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv1f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv1f16( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv1f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f16( %negva, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv1f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv1f16_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv1f16( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv1f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f16( %vb, %negva, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv1f16_neg_splat( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv1f16_neg_splat: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv1f16( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv1f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f16( %va, %negvb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv1f16_neg_splat_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv1f16_neg_splat_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv1f16( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv1f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f16( %negvb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv1f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv1f16_neg_splat_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv1f16( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv1f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f16( %va, %negvb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv1f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv1f16_neg_splat_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv1f16( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv1f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f16( %negvb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv1f16( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfnmadd.vv v9, v8, v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv1f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv1f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f16( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv1f16_commuted( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv1f16_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv1f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv1f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f16( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv1f16_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv1f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv1f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv1f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f16( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv1f16_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv1f16_unmasked_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv1f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv1f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f16( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv1f16( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv1f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv1f16( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f16( %negva, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv1f16_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv1f16_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv1f16( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f16( %vb, %negva, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv1f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv1f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv1f16( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f16( %negva, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv1f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv1f16_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv1f16( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f16( %vb, %negva, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv1f16_neg_splat( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv1f16_neg_splat: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv1f16( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f16( %va, %negvb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv1f16_neg_splat_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv1f16_neg_splat_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv1f16( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f16( %negvb, %va, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv1f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv1f16_neg_splat_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv1f16( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f16( %va, %negvb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv1f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv1f16_neg_splat_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv1f16( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f16( %negvb, %va, %vc, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fneg.nxv2f16(, , i32) + +define @vfmsub_vv_nxv2f16( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vv_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmsub.vv v9, v8, v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %negc = call @llvm.vp.fneg.nxv2f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f16( %va, %b, %negc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vv_nxv2f16_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vv_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmsub.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negc = call @llvm.vp.fneg.nxv2f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f16( %va, %b, %negc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv2f16( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv2f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f16( %va, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv2f16_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv2f16_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv2f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f16( %vb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv2f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv2f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f16( %va, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv2f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv2f16_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv2f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f16( %vb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv2f16( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfnmadd.vv v9, v8, v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv2f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv2f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f16( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv2f16_commuted( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv2f16_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv2f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv2f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f16( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv2f16_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv2f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv2f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f16( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv2f16_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv2f16_unmasked_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv2f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv2f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f16( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv2f16( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv2f16( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv2f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f16( %negva, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv2f16_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv2f16_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv2f16( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv2f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f16( %vb, %negva, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv2f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv2f16( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv2f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f16( %negva, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv2f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv2f16_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv2f16( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv2f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f16( %vb, %negva, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv2f16_neg_splat( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv2f16_neg_splat: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv2f16( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv2f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f16( %va, %negvb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv2f16_neg_splat_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv2f16_neg_splat_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv2f16( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv2f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f16( %negvb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv2f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv2f16_neg_splat_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv2f16( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv2f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f16( %va, %negvb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv2f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv2f16_neg_splat_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv2f16( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv2f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f16( %negvb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv2f16( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfnmadd.vv v9, v8, v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv2f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv2f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f16( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv2f16_commuted( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv2f16_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv2f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv2f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f16( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv2f16_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv2f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv2f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f16( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv2f16_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv2f16_unmasked_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv2f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv2f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f16( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv2f16( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv2f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv2f16( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f16( %negva, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv2f16_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv2f16_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv2f16( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f16( %vb, %negva, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv2f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv2f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv2f16( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f16( %negva, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv2f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv2f16_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv2f16( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f16( %vb, %negva, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv2f16_neg_splat( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv2f16_neg_splat: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv2f16( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f16( %va, %negvb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv2f16_neg_splat_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv2f16_neg_splat_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv2f16( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f16( %negvb, %va, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv2f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv2f16_neg_splat_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv2f16( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f16( %va, %negvb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv2f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv2f16_neg_splat_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv2f16( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f16( %negvb, %va, %vc, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fneg.nxv4f16(, , i32) + +define @vfmsub_vv_nxv4f16( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vv_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmsub.vv v9, v8, v10, v0.t +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret + %negc = call @llvm.vp.fneg.nxv4f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f16( %va, %b, %negc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vv_nxv4f16_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vv_nxv4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmsub.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negc = call @llvm.vp.fneg.nxv4f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f16( %va, %b, %negc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv4f16( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv4f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f16( %va, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv4f16_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv4f16_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv4f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f16( %vb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv4f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv4f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f16( %va, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv4f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv4f16_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv4f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f16( %vb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv4f16( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfnmadd.vv v9, v8, v10, v0.t +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv4f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv4f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f16( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv4f16_commuted( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv4f16_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv4f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv4f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f16( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv4f16_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv4f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv4f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f16( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv4f16_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv4f16_unmasked_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv4f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv4f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f16( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv4f16( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv4f16( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv4f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f16( %negva, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv4f16_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv4f16_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv4f16( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv4f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f16( %vb, %negva, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv4f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv4f16( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv4f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f16( %negva, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv4f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv4f16_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv4f16( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv4f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f16( %vb, %negva, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv4f16_neg_splat( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv4f16_neg_splat: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv4f16( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv4f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f16( %va, %negvb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv4f16_neg_splat_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv4f16_neg_splat_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv4f16( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv4f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f16( %negvb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv4f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv4f16_neg_splat_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv4f16( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv4f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f16( %va, %negvb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv4f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv4f16_neg_splat_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv4f16( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv4f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f16( %negvb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv4f16( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfnmadd.vv v9, v8, v10, v0.t +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv4f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv4f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f16( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv4f16_commuted( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv4f16_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv4f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv4f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f16( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv4f16_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv4f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv4f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f16( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv4f16_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv4f16_unmasked_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv4f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv4f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f16( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv4f16( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv4f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv4f16( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f16( %negva, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv4f16_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv4f16_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv4f16( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f16( %vb, %negva, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv4f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv4f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv4f16( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f16( %negva, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv4f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv4f16_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv4f16( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f16( %vb, %negva, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv4f16_neg_splat( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv4f16_neg_splat: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv4f16( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f16( %va, %negvb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv4f16_neg_splat_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv4f16_neg_splat_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv4f16( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f16( %negvb, %va, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv4f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv4f16_neg_splat_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv4f16( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f16( %va, %negvb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv4f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv4f16_neg_splat_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv4f16( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f16( %negvb, %va, %vc, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fneg.nxv8f16(, , i32) + +define @vfmsub_vv_nxv8f16( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmsub.vv v10, v8, v12, v0.t +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %negc = call @llvm.vp.fneg.nxv8f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f16( %va, %b, %negc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vv_nxv8f16_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vv_nxv8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmsub.vv v8, v10, v12 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negc = call @llvm.vp.fneg.nxv8f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f16( %va, %b, %negc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv8f16( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv8f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f16( %va, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv8f16_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv8f16_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv8f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f16( %vb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv8f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv8f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f16( %va, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv8f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv8f16_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv8f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f16( %vb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv8f16( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfnmadd.vv v10, v8, v12, v0.t +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv8f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv8f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f16( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv8f16_commuted( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv8f16_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv8f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv8f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f16( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv8f16_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v10, v12 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv8f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv8f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f16( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv8f16_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv8f16_unmasked_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v10, v12 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv8f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv8f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f16( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv8f16( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv8f16( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv8f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f16( %negva, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv8f16_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv8f16_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv8f16( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv8f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f16( %vb, %negva, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv8f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv8f16( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv8f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f16( %negva, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv8f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv8f16_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv8f16( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv8f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f16( %vb, %negva, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv8f16_neg_splat( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv8f16_neg_splat: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv8f16( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv8f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f16( %va, %negvb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv8f16_neg_splat_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv8f16_neg_splat_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv8f16( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv8f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f16( %negvb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv8f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv8f16_neg_splat_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv8f16( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv8f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f16( %va, %negvb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv8f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv8f16_neg_splat_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv8f16( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv8f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f16( %negvb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv8f16( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfnmadd.vv v10, v8, v12, v0.t +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv8f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv8f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f16( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv8f16_commuted( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv8f16_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv8f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv8f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f16( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv8f16_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v10, v12 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv8f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv8f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f16( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv8f16_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv8f16_unmasked_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v10, v12 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv8f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv8f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f16( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv8f16( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv8f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv8f16( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f16( %negva, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv8f16_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv8f16_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv8f16( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f16( %vb, %negva, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv8f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv8f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv8f16( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f16( %negva, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv8f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv8f16_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv8f16( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f16( %vb, %negva, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv8f16_neg_splat( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv8f16_neg_splat: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv8f16( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f16( %va, %negvb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv8f16_neg_splat_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv8f16_neg_splat_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv8f16( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f16( %negvb, %va, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv8f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv8f16_neg_splat_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv8f16( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f16( %va, %negvb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv8f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv8f16_neg_splat_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv8f16( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f16( %negvb, %va, %vc, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fneg.nxv16f16(, , i32) + +define @vfmsub_vv_nxv16f16( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vv_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmsub.vv v12, v8, v16, v0.t +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret + %negc = call @llvm.vp.fneg.nxv16f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f16( %va, %b, %negc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vv_nxv16f16_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vv_nxv16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmsub.vv v8, v12, v16 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negc = call @llvm.vp.fneg.nxv16f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f16( %va, %b, %negc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv16f16( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv16f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f16( %va, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv16f16_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv16f16_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv16f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f16( %vb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv16f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv16f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f16( %va, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv16f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv16f16_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv16f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f16( %vb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv16f16( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfnmadd.vv v12, v8, v16, v0.t +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv16f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv16f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f16( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv16f16_commuted( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv16f16_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv16f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv16f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f16( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv16f16_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v12, v16 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv16f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv16f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f16( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv16f16_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv16f16_unmasked_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v12, v16 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv16f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv16f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f16( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv16f16( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv16f16( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv16f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f16( %negva, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv16f16_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv16f16_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv16f16( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv16f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f16( %vb, %negva, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv16f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv16f16( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv16f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f16( %negva, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv16f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv16f16_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv16f16( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv16f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f16( %vb, %negva, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv16f16_neg_splat( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv16f16_neg_splat: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv16f16( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv16f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f16( %va, %negvb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv16f16_neg_splat_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv16f16_neg_splat_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv16f16( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv16f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f16( %negvb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv16f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv16f16_neg_splat_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv16f16( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv16f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f16( %va, %negvb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv16f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv16f16_neg_splat_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv16f16( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv16f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f16( %negvb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv16f16( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfnmadd.vv v12, v8, v16, v0.t +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv16f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv16f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f16( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv16f16_commuted( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv16f16_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv16f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv16f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f16( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv16f16_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v12, v16 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv16f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv16f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f16( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv16f16_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv16f16_unmasked_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v12, v16 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv16f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv16f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f16( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv16f16( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv16f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv16f16( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f16( %negva, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv16f16_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv16f16_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv16f16( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f16( %vb, %negva, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv16f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv16f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv16f16( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f16( %negva, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv16f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv16f16_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv16f16( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f16( %vb, %negva, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv16f16_neg_splat( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv16f16_neg_splat: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv16f16( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f16( %va, %negvb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv16f16_neg_splat_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv16f16_neg_splat_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv16f16( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f16( %negvb, %va, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv16f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv16f16_neg_splat_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv16f16( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f16( %va, %negvb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv16f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv16f16_neg_splat_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv16f16( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f16( %negvb, %va, %vc, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fneg.nxv32f16(, , i32) + +define @vfmsub_vv_nxv32f16( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vv_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vfmsub.vv v16, v8, v24, v0.t +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret + %negc = call @llvm.vp.fneg.nxv32f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv32f16( %va, %b, %negc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vv_nxv32f16_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vv_nxv32f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vfmsub.vv v8, v16, v24 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negc = call @llvm.vp.fneg.nxv32f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv32f16( %va, %b, %negc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv32f16( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv32f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv32f16( %va, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv32f16_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv32f16_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv32f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv32f16( %vb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv32f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv32f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv32f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv32f16( %va, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv32f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv32f16_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv32f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv32f16( %vb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv32f16( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vfnmadd.vv v16, v8, v24, v0.t +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv32f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv32f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv32f16( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv32f16_commuted( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv32f16_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv32f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv32f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv32f16( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv32f16_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv32f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v16, v24 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv32f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv32f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv32f16( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv32f16_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv32f16_unmasked_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v16, v24 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv32f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv32f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv32f16( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv32f16( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv32f16( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv32f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv32f16( %negva, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv32f16_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv32f16_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv32f16( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv32f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv32f16( %vb, %negva, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv32f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv32f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv32f16( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv32f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv32f16( %negva, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv32f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv32f16_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv32f16( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv32f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv32f16( %vb, %negva, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv32f16_neg_splat( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv32f16_neg_splat: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv32f16( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv32f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv32f16( %va, %negvb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv32f16_neg_splat_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv32f16_neg_splat_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv32f16( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv32f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv32f16( %negvb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv32f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv32f16_neg_splat_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv32f16( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv32f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv32f16( %va, %negvb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv32f16( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv32f16( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv32f16( %negvb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv32f16( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vfnmadd.vv v16, v8, v24, v0.t +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv32f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv32f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv32f16( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv32f16_commuted( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv32f16_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv32f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv32f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv32f16( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv32f16_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv32f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v16, v24 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv32f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv32f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv32f16( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv32f16_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv32f16_unmasked_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re16.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v16, v24 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv32f16( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv32f16( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv32f16( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv32f16( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv32f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv32f16( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv32f16( %negva, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv32f16_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv32f16_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv32f16( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv32f16( %vb, %negva, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv32f16_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv32f16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv32f16( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv32f16( %negva, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv32f16_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv32f16_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv32f16( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv32f16( %vb, %negva, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv32f16_neg_splat( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv32f16_neg_splat: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv32f16( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv32f16( %va, %negvb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv32f16_neg_splat_commute( %va, half %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv32f16_neg_splat_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv32f16( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv32f16( %negvb, %va, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv32f16_neg_splat_unmasked( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv32f16_neg_splat_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv32f16( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv32f16( %va, %negvb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv32f16_neg_splat_unmasked_commute( %va, half %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv32f16_neg_splat_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, half %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv32f16( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv32f16( %negvb, %va, %vc, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fneg.nxv1f32(, , i32) + +define @vfmsub_vv_nxv1f32( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vv_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmsub.vv v9, v8, v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %negc = call @llvm.vp.fneg.nxv1f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f32( %va, %b, %negc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vv_nxv1f32_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vv_nxv1f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmsub.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negc = call @llvm.vp.fneg.nxv1f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f32( %va, %b, %negc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv1f32( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv1f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f32( %va, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv1f32_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv1f32_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv1f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f32( %vb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv1f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv1f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv1f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f32( %va, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv1f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv1f32_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv1f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f32( %vb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv1f32( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfnmadd.vv v9, v8, v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv1f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv1f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f32( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv1f32_commuted( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv1f32_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv1f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv1f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f32( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv1f32_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv1f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv1f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv1f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f32( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv1f32_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv1f32_unmasked_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv1f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv1f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f32( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv1f32( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv1f32( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv1f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f32( %negva, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv1f32_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv1f32_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv1f32( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv1f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f32( %vb, %negva, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv1f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv1f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv1f32( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv1f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f32( %negva, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv1f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv1f32_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv1f32( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv1f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f32( %vb, %negva, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv1f32_neg_splat( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv1f32_neg_splat: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv1f32( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv1f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f32( %va, %negvb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv1f32_neg_splat_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv1f32_neg_splat_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv1f32( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv1f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f32( %negvb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv1f32_neg_splat_unmasked( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv1f32_neg_splat_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv1f32( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv1f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f32( %va, %negvb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv1f32_neg_splat_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv1f32_neg_splat_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv1f32( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv1f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f32( %negvb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv1f32( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfnmadd.vv v9, v8, v10, v0.t +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv1f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv1f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f32( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv1f32_commuted( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv1f32_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv1f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv1f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f32( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv1f32_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv1f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv1f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv1f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f32( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv1f32_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv1f32_unmasked_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv1f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv1f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f32( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv1f32( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv1f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv1f32( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f32( %negva, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv1f32_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv1f32_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv1f32( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f32( %vb, %negva, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv1f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv1f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv1f32( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f32( %negva, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv1f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv1f32_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv1f32( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f32( %vb, %negva, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv1f32_neg_splat( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv1f32_neg_splat: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv1f32( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f32( %va, %negvb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv1f32_neg_splat_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv1f32_neg_splat_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv1f32( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f32( %negvb, %va, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv1f32_neg_splat_unmasked( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv1f32_neg_splat_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv1f32( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f32( %va, %negvb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv1f32_neg_splat_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv1f32_neg_splat_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv1f32( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f32( %negvb, %va, %vc, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fneg.nxv2f32(, , i32) + +define @vfmsub_vv_nxv2f32( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vv_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmsub.vv v9, v8, v10, v0.t +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret + %negc = call @llvm.vp.fneg.nxv2f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f32( %va, %b, %negc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vv_nxv2f32_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vv_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmsub.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negc = call @llvm.vp.fneg.nxv2f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f32( %va, %b, %negc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv2f32( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv2f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f32( %va, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv2f32_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv2f32_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv2f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f32( %vb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv2f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv2f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f32( %va, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv2f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv2f32_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv2f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f32( %vb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv2f32( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfnmadd.vv v9, v8, v10, v0.t +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv2f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv2f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f32( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv2f32_commuted( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv2f32_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv2f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv2f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f32( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv2f32_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv2f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv2f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f32( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv2f32_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv2f32_unmasked_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv2f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv2f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f32( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv2f32( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv2f32( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv2f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f32( %negva, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv2f32_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv2f32_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv2f32( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv2f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f32( %vb, %negva, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv2f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv2f32( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv2f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f32( %negva, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv2f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv2f32_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv2f32( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv2f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f32( %vb, %negva, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv2f32_neg_splat( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv2f32_neg_splat: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv2f32( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv2f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f32( %va, %negvb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv2f32_neg_splat_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv2f32_neg_splat_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv2f32( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv2f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f32( %negvb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv2f32_neg_splat_unmasked( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv2f32_neg_splat_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv2f32( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv2f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f32( %va, %negvb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv2f32_neg_splat_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv2f32_neg_splat_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv2f32( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv2f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f32( %negvb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv2f32( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfnmadd.vv v9, v8, v10, v0.t +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv2f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv2f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f32( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv2f32_commuted( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv2f32_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv2f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv2f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f32( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv2f32_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv2f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv2f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f32( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv2f32_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv2f32_unmasked_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv2f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv2f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f32( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv2f32( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv2f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv2f32( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f32( %negva, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv2f32_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv2f32_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv2f32( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f32( %vb, %negva, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv2f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv2f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv2f32( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f32( %negva, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv2f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv2f32_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv2f32( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f32( %vb, %negva, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv2f32_neg_splat( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv2f32_neg_splat: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv2f32( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f32( %va, %negvb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv2f32_neg_splat_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv2f32_neg_splat_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv2f32( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f32( %negvb, %va, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv2f32_neg_splat_unmasked( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv2f32_neg_splat_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv2f32( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f32( %va, %negvb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv2f32_neg_splat_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv2f32_neg_splat_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv2f32( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f32( %negvb, %va, %vc, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fneg.nxv4f32(, , i32) + +define @vfmsub_vv_nxv4f32( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vv_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmsub.vv v10, v8, v12, v0.t +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %negc = call @llvm.vp.fneg.nxv4f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f32( %va, %b, %negc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vv_nxv4f32_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vv_nxv4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmsub.vv v8, v10, v12 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negc = call @llvm.vp.fneg.nxv4f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f32( %va, %b, %negc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv4f32( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv4f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f32( %va, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv4f32_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv4f32_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv4f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f32( %vb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv4f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv4f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f32( %va, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv4f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv4f32_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv4f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f32( %vb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv4f32( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfnmadd.vv v10, v8, v12, v0.t +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv4f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv4f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f32( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv4f32_commuted( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv4f32_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv4f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv4f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f32( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv4f32_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v10, v12 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv4f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv4f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f32( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv4f32_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv4f32_unmasked_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v10, v12 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv4f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv4f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f32( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv4f32( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv4f32( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv4f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f32( %negva, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv4f32_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv4f32_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv4f32( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv4f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f32( %vb, %negva, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv4f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv4f32( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv4f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f32( %negva, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv4f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv4f32_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv4f32( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv4f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f32( %vb, %negva, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv4f32_neg_splat( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv4f32_neg_splat: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv4f32( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv4f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f32( %va, %negvb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv4f32_neg_splat_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv4f32_neg_splat_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv4f32( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv4f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f32( %negvb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv4f32_neg_splat_unmasked( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv4f32_neg_splat_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv4f32( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv4f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f32( %va, %negvb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv4f32_neg_splat_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv4f32_neg_splat_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv4f32( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv4f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f32( %negvb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv4f32( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfnmadd.vv v10, v8, v12, v0.t +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv4f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv4f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f32( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv4f32_commuted( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv4f32_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv4f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv4f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f32( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv4f32_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v10, v12 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv4f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv4f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f32( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv4f32_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv4f32_unmasked_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v10, v12 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv4f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv4f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f32( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv4f32( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv4f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv4f32( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f32( %negva, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv4f32_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv4f32_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv4f32( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f32( %vb, %negva, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv4f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv4f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv4f32( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f32( %negva, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv4f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv4f32_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv4f32( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f32( %vb, %negva, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv4f32_neg_splat( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv4f32_neg_splat: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv4f32( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f32( %va, %negvb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv4f32_neg_splat_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv4f32_neg_splat_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv4f32( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f32( %negvb, %va, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv4f32_neg_splat_unmasked( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv4f32_neg_splat_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv4f32( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f32( %va, %negvb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv4f32_neg_splat_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv4f32_neg_splat_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv4f32( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f32( %negvb, %va, %vc, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fneg.nxv8f32(, , i32) + +define @vfmsub_vv_nxv8f32( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmsub.vv v12, v8, v16, v0.t +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret + %negc = call @llvm.vp.fneg.nxv8f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f32( %va, %b, %negc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vv_nxv8f32_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vv_nxv8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmsub.vv v8, v12, v16 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negc = call @llvm.vp.fneg.nxv8f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f32( %va, %b, %negc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv8f32( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv8f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f32( %va, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv8f32_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv8f32_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv8f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f32( %vb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv8f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv8f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f32( %va, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv8f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv8f32_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv8f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f32( %vb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv8f32( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfnmadd.vv v12, v8, v16, v0.t +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv8f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv8f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f32( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv8f32_commuted( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv8f32_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv8f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv8f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f32( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv8f32_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v12, v16 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv8f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv8f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f32( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv8f32_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv8f32_unmasked_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v12, v16 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv8f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv8f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f32( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv8f32( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv8f32( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv8f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f32( %negva, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv8f32_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv8f32_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv8f32( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv8f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f32( %vb, %negva, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv8f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv8f32( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv8f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f32( %negva, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv8f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv8f32_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv8f32( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv8f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f32( %vb, %negva, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv8f32_neg_splat( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv8f32_neg_splat: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv8f32( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv8f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f32( %va, %negvb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv8f32_neg_splat_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv8f32_neg_splat_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv8f32( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv8f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f32( %negvb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv8f32_neg_splat_unmasked( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv8f32_neg_splat_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv8f32( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv8f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f32( %va, %negvb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv8f32_neg_splat_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv8f32_neg_splat_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv8f32( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv8f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f32( %negvb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv8f32( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfnmadd.vv v12, v8, v16, v0.t +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv8f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv8f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f32( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv8f32_commuted( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv8f32_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv8f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv8f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f32( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv8f32_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v12, v16 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv8f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv8f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f32( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv8f32_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv8f32_unmasked_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v12, v16 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv8f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv8f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f32( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv8f32( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv8f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv8f32( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f32( %negva, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv8f32_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv8f32_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv8f32( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f32( %vb, %negva, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv8f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv8f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv8f32( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f32( %negva, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv8f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv8f32_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv8f32( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f32( %vb, %negva, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv8f32_neg_splat( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv8f32_neg_splat: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv8f32( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f32( %va, %negvb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv8f32_neg_splat_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv8f32_neg_splat_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv8f32( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f32( %negvb, %va, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv8f32_neg_splat_unmasked( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv8f32_neg_splat_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv8f32( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f32( %va, %negvb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv8f32_neg_splat_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv8f32_neg_splat_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv8f32( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f32( %negvb, %va, %vc, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fneg.nxv16f32(, , i32) + +define @vfmsub_vv_nxv16f32( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vv_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vfmsub.vv v16, v8, v24, v0.t +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret + %negc = call @llvm.vp.fneg.nxv16f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f32( %va, %b, %negc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vv_nxv16f32_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vv_nxv16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vfmsub.vv v8, v16, v24 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negc = call @llvm.vp.fneg.nxv16f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f32( %va, %b, %negc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv16f32( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv16f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f32( %va, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv16f32_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv16f32_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv16f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f32( %vb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv16f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv16f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f32( %va, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv16f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv16f32_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv16f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f32( %vb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv16f32( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vfnmadd.vv v16, v8, v24, v0.t +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv16f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv16f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f32( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv16f32_commuted( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv16f32_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv16f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv16f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f32( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv16f32_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v16, v24 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv16f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv16f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f32( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv16f32_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv16f32_unmasked_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v16, v24 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv16f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv16f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f32( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv16f32( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv16f32( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv16f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f32( %negva, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv16f32_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv16f32_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv16f32( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv16f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f32( %vb, %negva, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv16f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv16f32( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv16f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f32( %negva, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv16f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv16f32_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv16f32( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv16f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f32( %vb, %negva, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv16f32_neg_splat( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv16f32_neg_splat: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv16f32( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv16f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f32( %va, %negvb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv16f32_neg_splat_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv16f32_neg_splat_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv16f32( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv16f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f32( %negvb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv16f32_neg_splat_unmasked( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv16f32_neg_splat_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv16f32( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv16f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f32( %va, %negvb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv16f32_neg_splat_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv16f32_neg_splat_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv16f32( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv16f32( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f32( %negvb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv16f32( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vfnmadd.vv v16, v8, v24, v0.t +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv16f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv16f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f32( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv16f32_commuted( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv16f32_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv16f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv16f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f32( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv16f32_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v16, v24 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv16f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv16f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f32( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv16f32_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv16f32_unmasked_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re32.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v16, v24 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv16f32( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv16f32( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f32( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv16f32( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv16f32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv16f32( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f32( %negva, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv16f32_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv16f32_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv16f32( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f32( %vb, %negva, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv16f32_unmasked( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv16f32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv16f32( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f32( %negva, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv16f32_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv16f32_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv16f32( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f32( %vb, %negva, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv16f32_neg_splat( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv16f32_neg_splat: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv16f32( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f32( %va, %negvb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv16f32_neg_splat_commute( %va, float %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv16f32_neg_splat_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv16f32( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f32( %negvb, %va, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv16f32_neg_splat_unmasked( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv16f32_neg_splat_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv16f32( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f32( %va, %negvb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv16f32_neg_splat_unmasked_commute( %va, float %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv16f32_neg_splat_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, float %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv16f32( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv16f32( %negvb, %va, %vc, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fneg.nxv1f64(, , i32) + +define @vfmsub_vv_nxv1f64( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vv_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmsub.vv v9, v8, v10, v0.t +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret + %negc = call @llvm.vp.fneg.nxv1f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f64( %va, %b, %negc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vv_nxv1f64_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vv_nxv1f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmsub.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negc = call @llvm.vp.fneg.nxv1f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f64( %va, %b, %negc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv1f64( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv1f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f64( %va, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv1f64_commute( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv1f64_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv1f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f64( %vb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv1f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv1f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv1f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f64( %va, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv1f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv1f64_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv1f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f64( %vb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv1f64( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfnmadd.vv v9, v8, v10, v0.t +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv1f64( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv1f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f64( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv1f64_commuted( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv1f64_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv1f64( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv1f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f64( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv1f64_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv1f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv1f64( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv1f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f64( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv1f64_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv1f64_unmasked_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv1f64( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv1f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f64( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv1f64( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv1f64( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv1f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f64( %negva, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv1f64_commute( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv1f64_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv1f64( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv1f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f64( %vb, %negva, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv1f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv1f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv1f64( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv1f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f64( %negva, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv1f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv1f64_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv1f64( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv1f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f64( %vb, %negva, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv1f64_neg_splat( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv1f64_neg_splat: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv1f64( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv1f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f64( %va, %negvb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv1f64_neg_splat_commute( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv1f64_neg_splat_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv1f64( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv1f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f64( %negvb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv1f64_neg_splat_unmasked( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv1f64_neg_splat_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv1f64( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv1f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f64( %va, %negvb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv1f64_neg_splat_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv1f64_neg_splat_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv1f64( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv1f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f64( %negvb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv1f64( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfnmadd.vv v9, v8, v10, v0.t +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv1f64( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv1f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f64( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv1f64_commuted( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv1f64_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10, v0.t +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv1f64( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv1f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f64( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv1f64_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv1f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv1f64( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv1f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f64( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv1f64_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv1f64_unmasked_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v9, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv1f64( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv1f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f64( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv1f64( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv1f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv1f64( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f64( %negva, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv1f64_commute( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv1f64_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv1f64( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f64( %vb, %negva, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv1f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv1f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv1f64( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f64( %negva, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv1f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv1f64_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv1f64( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f64( %vb, %negva, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv1f64_neg_splat( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv1f64_neg_splat: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv1f64( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f64( %va, %negvb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv1f64_neg_splat_commute( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv1f64_neg_splat_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv1f64( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f64( %negvb, %va, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv1f64_neg_splat_unmasked( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv1f64_neg_splat_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv1f64( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f64( %va, %negvb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv1f64_neg_splat_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv1f64_neg_splat_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v9 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv1f64( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv1f64( %negvb, %va, %vc, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fneg.nxv2f64(, , i32) + +define @vfmsub_vv_nxv2f64( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vv_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmsub.vv v10, v8, v12, v0.t +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %negc = call @llvm.vp.fneg.nxv2f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f64( %va, %b, %negc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vv_nxv2f64_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vv_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmsub.vv v8, v10, v12 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negc = call @llvm.vp.fneg.nxv2f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f64( %va, %b, %negc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv2f64( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv2f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f64( %va, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv2f64_commute( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv2f64_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv2f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f64( %vb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv2f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv2f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f64( %va, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv2f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv2f64_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv2f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f64( %vb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv2f64( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfnmadd.vv v10, v8, v12, v0.t +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv2f64( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv2f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f64( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv2f64_commuted( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv2f64_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv2f64( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv2f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f64( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv2f64_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v10, v12 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv2f64( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv2f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f64( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv2f64_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv2f64_unmasked_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v10, v12 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv2f64( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv2f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f64( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv2f64( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv2f64( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv2f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f64( %negva, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv2f64_commute( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv2f64_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv2f64( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv2f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f64( %vb, %negva, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv2f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv2f64( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv2f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f64( %negva, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv2f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv2f64_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv2f64( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv2f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f64( %vb, %negva, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv2f64_neg_splat( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv2f64_neg_splat: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv2f64( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv2f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f64( %va, %negvb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv2f64_neg_splat_commute( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv2f64_neg_splat_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv2f64( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv2f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f64( %negvb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv2f64_neg_splat_unmasked( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv2f64_neg_splat_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv2f64( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv2f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f64( %va, %negvb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv2f64_neg_splat_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv2f64_neg_splat_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv2f64( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv2f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f64( %negvb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv2f64( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfnmadd.vv v10, v8, v12, v0.t +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv2f64( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv2f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f64( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv2f64_commuted( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv2f64_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v10, v12, v0.t +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv2f64( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv2f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f64( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv2f64_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v10, v12 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv2f64( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv2f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f64( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv2f64_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv2f64_unmasked_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v10, v12 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv2f64( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv2f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f64( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv2f64( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv2f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv2f64( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f64( %negva, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv2f64_commute( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv2f64_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv2f64( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f64( %vb, %negva, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv2f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv2f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv2f64( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f64( %negva, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv2f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv2f64_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv2f64( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f64( %vb, %negva, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv2f64_neg_splat( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv2f64_neg_splat: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv2f64( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f64( %va, %negvb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv2f64_neg_splat_commute( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv2f64_neg_splat_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv2f64( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f64( %negvb, %va, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv2f64_neg_splat_unmasked( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv2f64_neg_splat_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv2f64( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f64( %va, %negvb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv2f64_neg_splat_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv2f64_neg_splat_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv2f64( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv2f64( %negvb, %va, %vc, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fneg.nxv4f64(, , i32) + +define @vfmsub_vv_nxv4f64( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vv_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmsub.vv v12, v8, v16, v0.t +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret + %negc = call @llvm.vp.fneg.nxv4f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f64( %va, %b, %negc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vv_nxv4f64_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vv_nxv4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmsub.vv v8, v12, v16 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negc = call @llvm.vp.fneg.nxv4f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f64( %va, %b, %negc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv4f64( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv4f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f64( %va, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv4f64_commute( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv4f64_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv4f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f64( %vb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv4f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv4f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f64( %va, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv4f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv4f64_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv4f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f64( %vb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv4f64( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfnmadd.vv v12, v8, v16, v0.t +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv4f64( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv4f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f64( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv4f64_commuted( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv4f64_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv4f64( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv4f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f64( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv4f64_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v12, v16 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv4f64( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv4f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f64( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv4f64_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv4f64_unmasked_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v12, v16 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv4f64( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv4f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f64( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv4f64( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv4f64( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv4f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f64( %negva, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv4f64_commute( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv4f64_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv4f64( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv4f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f64( %vb, %negva, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv4f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv4f64( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv4f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f64( %negva, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv4f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv4f64_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv4f64( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv4f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f64( %vb, %negva, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv4f64_neg_splat( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv4f64_neg_splat: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv4f64( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv4f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f64( %va, %negvb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv4f64_neg_splat_commute( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv4f64_neg_splat_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv4f64( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv4f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f64( %negvb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv4f64_neg_splat_unmasked( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv4f64_neg_splat_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv4f64( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv4f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f64( %va, %negvb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv4f64_neg_splat_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv4f64_neg_splat_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv4f64( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv4f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f64( %negvb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv4f64( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfnmadd.vv v12, v8, v16, v0.t +; CHECK-NEXT: vmv.v.v v8, v12 +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv4f64( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv4f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f64( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv4f64_commuted( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv4f64_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v12, v16, v0.t +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv4f64( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv4f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f64( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv4f64_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v12, v16 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv4f64( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv4f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f64( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv4f64_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv4f64_unmasked_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v12, v16 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv4f64( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv4f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f64( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv4f64( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv4f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv4f64( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f64( %negva, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv4f64_commute( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv4f64_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv4f64( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f64( %vb, %negva, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv4f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv4f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv4f64( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f64( %negva, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv4f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv4f64_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv4f64( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f64( %vb, %negva, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv4f64_neg_splat( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv4f64_neg_splat: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv4f64( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f64( %va, %negvb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv4f64_neg_splat_commute( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv4f64_neg_splat_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv4f64( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f64( %negvb, %va, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv4f64_neg_splat_unmasked( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv4f64_neg_splat_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv4f64( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f64( %va, %negvb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv4f64_neg_splat_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv4f64_neg_splat_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv4f64( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv4f64( %negvb, %va, %vc, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.fneg.nxv8f64(, , i32) + +define @vfmsub_vv_nxv8f64( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vfmsub.vv v16, v8, v24, v0.t +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret + %negc = call @llvm.vp.fneg.nxv8f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f64( %va, %b, %negc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vv_nxv8f64_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vv_nxv8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vfmsub.vv v8, v16, v24 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negc = call @llvm.vp.fneg.nxv8f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f64( %va, %b, %negc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv8f64( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv8f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f64( %va, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv8f64_commute( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv8f64_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv8f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f64( %vb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv8f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv8f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f64( %va, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfmsub_vf_nxv8f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfmsub_vf_nxv8f64_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmsub.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvc = call @llvm.vp.fneg.nxv8f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f64( %vb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv8f64( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vfnmadd.vv v16, v8, v24, v0.t +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv8f64( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv8f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f64( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv8f64_commuted( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv8f64_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv8f64( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv8f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f64( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv8f64_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v16, v24 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv8f64( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv8f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f64( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vv_nxv8f64_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vv_nxv8f64_unmasked_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v16, v24 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv8f64( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv8f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f64( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv8f64( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv8f64( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv8f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f64( %negva, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv8f64_commute( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv8f64_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv8f64( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv8f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f64( %vb, %negva, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv8f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv8f64( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv8f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f64( %negva, %vb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv8f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv8f64_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv8f64( %va, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv8f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f64( %vb, %negva, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv8f64_neg_splat( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv8f64_neg_splat: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv8f64( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv8f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f64( %va, %negvb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv8f64_neg_splat_commute( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv8f64_neg_splat_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv8f64( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv8f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f64( %negvb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv8f64_neg_splat_unmasked( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv8f64_neg_splat_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv8f64( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv8f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f64( %va, %negvb, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmadd_vf_nxv8f64_neg_splat_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmadd_vf_nxv8f64_neg_splat_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfnmadd.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv8f64( %vb, %m, i32 %evl) + %negvc = call @llvm.vp.fneg.nxv8f64( %vc, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f64( %negvb, %va, %negvc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv8f64( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vfnmadd.vv v16, v8, v24, v0.t +; CHECK-NEXT: vmv.v.v v8, v16 +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv8f64( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv8f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f64( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv8f64_commuted( %va, %b, %c, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv8f64_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v16, v24, v0.t +; CHECK-NEXT: ret + %negb = call @llvm.vp.fneg.nxv8f64( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv8f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f64( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv8f64_unmasked( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v16, v24 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv8f64( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv8f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f64( %va, %negb, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vv_nxv8f64_unmasked_commuted( %va, %b, %c, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vv_nxv8f64_unmasked_commuted: +; CHECK: # %bb.0: +; CHECK-NEXT: vl8re64.v v24, (a0) +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; CHECK-NEXT: vfnmadd.vv v8, v16, v24 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negb = call @llvm.vp.fneg.nxv8f64( %b, %m, i32 %evl) + %negc = call @llvm.vp.fneg.nxv8f64( %c, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f64( %negb, %va, %negc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv8f64( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv8f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv8f64( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f64( %negva, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv8f64_commute( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv8f64_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv8f64( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f64( %vb, %negva, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv8f64_unmasked( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv8f64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv8f64( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f64( %negva, %vb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv8f64_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv8f64_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negva = call @llvm.vp.fneg.nxv8f64( %va, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f64( %vb, %negva, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv8f64_neg_splat( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv8f64_neg_splat: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv8f64( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f64( %va, %negvb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv8f64_neg_splat_commute( %va, double %b, %vc, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv8f64_neg_splat_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v16, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv8f64( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f64( %negvb, %va, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv8f64_neg_splat_unmasked( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv8f64_neg_splat_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv8f64( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f64( %va, %negvb, %vc, %m, i32 %evl) + ret %v +} + +define @vfnmsub_vf_nxv8f64_neg_splat_unmasked_commute( %va, double %b, %vc, i32 zeroext %evl) { +; CHECK-LABEL: vfnmsub_vf_nxv8f64_neg_splat_unmasked_commute: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfnmsub.vf v8, fa0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, double %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %negvb = call @llvm.vp.fneg.nxv8f64( %vb, %m, i32 %evl) + %v = call @llvm.vp.fmuladd.nxv8f64( %negvb, %va, %vc, %m, i32 %evl) + ret %v +} diff --git a/llvm/unittests/IR/VPIntrinsicTest.cpp b/llvm/unittests/IR/VPIntrinsicTest.cpp index 7be326d17559b5..520d3b22922c7d 100644 --- a/llvm/unittests/IR/VPIntrinsicTest.cpp +++ b/llvm/unittests/IR/VPIntrinsicTest.cpp @@ -60,6 +60,8 @@ class VPIntrinsicTest : public testing::Test { "i32)"; Str << " declare <8 x float> @llvm.vp.fma.v8f32(<8 x float>, <8 x float>, " "<8 x float>, <8 x i1>, i32) "; + Str << " declare <8 x float> @llvm.vp.fmuladd.v8f32(<8 x float>, " + "<8 x float>, <8 x float>, <8 x i1>, i32) "; Str << " declare void @llvm.vp.store.v8i32.p0v8i32(<8 x i32>, <8 x i32>*, " "<8 x i1>, i32) ";