diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td index f7d1a099c69d9..b9c5b75983b1f 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvfbf.td @@ -668,4 +668,38 @@ foreach vti = NoGroupBF16Vectors in { def : Pat<(vti.Scalar (extractelt (vti.Vector vti.RegClass:$rs2), 0)), (vfmv_f_s_inst vti.RegClass:$rs2, vti.Log2SEW)>; } + +let Predicates = [HasStdExtZvfbfa] in { + foreach fvtiToFWti = AllWidenableBF16ToFloatVectors in { + defvar fvti = fvtiToFWti.Vti; + defvar fwti = fvtiToFWti.Wti; + def : Pat<(fwti.Vector (any_riscv_fpextend_vl + (fvti.Vector fvti.RegClass:$rs1), + (fvti.Mask VMV0:$vm), + VLOpFrag)), + (!cast("PseudoVFWCVT_F_F_ALT_V_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") + (fwti.Vector (IMPLICIT_DEF)), fvti.RegClass:$rs1, + (fvti.Mask VMV0:$vm), + GPR:$vl, fvti.Log2SEW, TA_MA)>; + + def : Pat<(fvti.Vector (any_riscv_fpround_vl + (fwti.Vector fwti.RegClass:$rs1), + (fwti.Mask VMV0:$vm), VLOpFrag)), + (!cast("PseudoVFNCVT_F_F_ALT_W_"#fvti.LMul.MX#"_E"#fvti.SEW#"_MASK") + (fvti.Vector (IMPLICIT_DEF)), fwti.RegClass:$rs1, + (fwti.Mask VMV0:$vm), + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + GPR:$vl, fvti.Log2SEW, TA_MA)>; + def : Pat<(fvti.Vector (fpround (fwti.Vector fwti.RegClass:$rs1))), + (!cast("PseudoVFNCVT_F_F_ALT_W_"#fvti.LMul.MX#"_E"#fvti.SEW) + (fvti.Vector (IMPLICIT_DEF)), + fwti.RegClass:$rs1, + // Value to indicate no rounding mode change in + // RISCVInsertReadWriteCSR + FRM_DYN, + fvti.AVL, fvti.Log2SEW, TA_MA)>; + } +} } // Predicates = [HasStdExtZvfbfa] diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll index 061b2b0c5ab37..abd00b647e374 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-sdnode.ll @@ -11,33 +11,80 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: llc -mtriple=riscv64 -mattr=+zvfh,+experimental-zvfbfa,+v \ +; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ +; RUN: --check-prefixes=CHECK,ZVFBFA define @vfadd_vv_nxv1bf16( %va, %vb) { -; CHECK-LABEL: vfadd_vv_nxv1bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9 -; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma -; CHECK-NEXT: vfadd.vv v9, v9, v10 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv1bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v9, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFH-NEXT: vfadd.vv v9, v9, v10 +; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv1bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v9, v9, v10 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv1bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, mf4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v9, v10 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9 +; ZVFBFA-NEXT: ret %vc = fadd %va, %vb ret %vc } define @vfadd_vf_nxv1bf16( %va, bfloat %b) { -; CHECK-LABEL: vfadd_vf_nxv1bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: fcvt.s.bf16 fa5, fa0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma -; CHECK-NEXT: vfadd.vf v9, v9, fa5 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv1bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v9, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFH-NEXT: vfadd.vf v9, v9, fa5 +; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv1bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfadd.vf v9, v9, fa5 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv1bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, mf4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFBFA-NEXT: vfadd.vf v9, v9, fa5 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9 +; ZVFBFA-NEXT: ret %head = insertelement poison, bfloat %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = fadd %va, %splat @@ -45,31 +92,75 @@ define @vfadd_vf_nxv1bf16( %va, bfloa } define @vfadd_vv_nxv2bf16( %va, %vb) { -; CHECK-LABEL: vfadd_vv_nxv2bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9 -; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; CHECK-NEXT: vfadd.vv v9, v9, v10 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv2bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v9, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFH-NEXT: vfadd.vv v9, v9, v10 +; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv2bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v9, v9, v10 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv2bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, mf2, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v9, v10 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9 +; ZVFBFA-NEXT: ret %vc = fadd %va, %vb ret %vc } define @vfadd_vf_nxv2bf16( %va, bfloat %b) { -; CHECK-LABEL: vfadd_vf_nxv2bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: fcvt.s.bf16 fa5, fa0 -; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; CHECK-NEXT: vfadd.vf v9, v9, fa5 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv2bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v9, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFH-NEXT: vfadd.vf v9, v9, fa5 +; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv2bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfadd.vf v9, v9, fa5 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv2bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, mf2, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFBFA-NEXT: vfadd.vf v9, v9, fa5 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9 +; ZVFBFA-NEXT: ret %head = insertelement poison, bfloat %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = fadd %va, %splat @@ -77,31 +168,75 @@ define @vfadd_vf_nxv2bf16( %va, bfloa } define @vfadd_vv_nxv4bf16( %va, %vb) { -; CHECK-LABEL: vfadd_vv_nxv4bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9 -; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma -; CHECK-NEXT: vfadd.vv v10, v12, v10 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv4bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFH-NEXT: vfadd.vv v10, v12, v10 +; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv4bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v10, v12, v10 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv4bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m1, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v10, v12, v10 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m1, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10 +; ZVFBFA-NEXT: ret %vc = fadd %va, %vb ret %vc } define @vfadd_vf_nxv4bf16( %va, bfloat %b) { -; CHECK-LABEL: vfadd_vf_nxv4bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: fcvt.s.bf16 fa5, fa0 -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma -; CHECK-NEXT: vfadd.vf v10, v10, fa5 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv4bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFH-NEXT: vfadd.vf v10, v10, fa5 +; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv4bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfadd.vf v10, v10, fa5 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv4bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m1, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFBFA-NEXT: vfadd.vf v10, v10, fa5 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m1, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10 +; ZVFBFA-NEXT: ret %head = insertelement poison, bfloat %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = fadd %va, %splat @@ -109,31 +244,75 @@ define @vfadd_vf_nxv4bf16( %va, bfloa } define @vfadd_vv_nxv8bf16( %va, %vb) { -; CHECK-LABEL: vfadd_vv_nxv8bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v10 -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma -; CHECK-NEXT: vfadd.vv v12, v16, v12 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv8bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v10 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFH-NEXT: vfadd.vv v12, v16, v12 +; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv8bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v12, v16, v12 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv8bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m2, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFBFA-NEXT: vfadd.vv v12, v16, v12 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12 +; ZVFBFA-NEXT: ret %vc = fadd %va, %vb ret %vc } define @vfadd_vf_nxv8bf16( %va, bfloat %b) { -; CHECK-LABEL: vfadd_vf_nxv8bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: fcvt.s.bf16 fa5, fa0 -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma -; CHECK-NEXT: vfadd.vf v12, v12, fa5 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv8bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFH-NEXT: vfadd.vf v12, v12, fa5 +; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv8bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfadd.vf v12, v12, fa5 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv8bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m2, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFBFA-NEXT: vfadd.vf v12, v12, fa5 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12 +; ZVFBFA-NEXT: ret %head = insertelement poison, bfloat %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = fadd %va, %splat @@ -141,16 +320,38 @@ define @vfadd_vf_nxv8bf16( %va, bfloa } define @vfadd_fv_nxv8bf16( %va, bfloat %b) { -; CHECK-LABEL: vfadd_fv_nxv8bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: fcvt.s.bf16 fa5, fa0 -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma -; CHECK-NEXT: vfadd.vf v12, v12, fa5 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_fv_nxv8bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFH-NEXT: vfadd.vf v12, v12, fa5 +; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_fv_nxv8bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfadd.vf v12, v12, fa5 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_fv_nxv8bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m2, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFBFA-NEXT: vfadd.vf v12, v12, fa5 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12 +; ZVFBFA-NEXT: ret %head = insertelement poison, bfloat %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = fadd %splat, %va @@ -158,31 +359,75 @@ define @vfadd_fv_nxv8bf16( %va, bfloa } define @vfadd_vv_nxv16bf16( %va, %vb) { -; CHECK-LABEL: vfadd_vv_nxv16bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12 -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v16, v24, v16 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv16bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v12 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v16, v24, v16 +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv16bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv16bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v24, v16 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16 +; ZVFBFA-NEXT: ret %vc = fadd %va, %vb ret %vc } define @vfadd_vf_nxv16bf16( %va, bfloat %b) { -; CHECK-LABEL: vfadd_vf_nxv16bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: fcvt.s.bf16 fa5, fa0 -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vf v16, v16, fa5 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv16bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vf v16, v16, fa5 +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv16bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vf v16, v16, fa5 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv16bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fcvt.s.bf16 fa5, fa0 +; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vf v16, v16, fa5 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16 +; ZVFBFA-NEXT: ret %head = insertelement poison, bfloat %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = fadd %va, %splat @@ -190,78 +435,216 @@ define @vfadd_vf_nxv16bf16( %va, bf } define @vfadd_vv_nxv32bf16( %va, %vb) { -; CHECK-LABEL: vfadd_vv_nxv32bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: sub sp, sp, a0 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v16 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill -; CHECK-NEXT: vfwcvtbf16.f.f.v v0, v8 -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v20 -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12 -; CHECK-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v0, v0, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v0 -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v16, v16, v24 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add sp, sp, a0 -; CHECK-NEXT: .cfi_def_cfa sp, 16 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: .cfi_def_cfa_offset 0 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv32bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: addi sp, sp, -16 +; ZVFH-NEXT: .cfi_def_cfa_offset 16 +; ZVFH-NEXT: csrr a0, vlenb +; ZVFH-NEXT: slli a0, a0, 3 +; ZVFH-NEXT: sub sp, sp, a0 +; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v16 +; ZVFH-NEXT: addi a0, sp, 16 +; ZVFH-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill +; ZVFH-NEXT: vfwcvtbf16.f.f.v v0, v8 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v20 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v12 +; ZVFH-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v0, v0, v8 +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v0 +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v16, v16, v24 +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v12, v16 +; ZVFH-NEXT: csrr a0, vlenb +; ZVFH-NEXT: slli a0, a0, 3 +; ZVFH-NEXT: add sp, sp, a0 +; ZVFH-NEXT: .cfi_def_cfa sp, 16 +; ZVFH-NEXT: addi sp, sp, 16 +; ZVFH-NEXT: .cfi_def_cfa_offset 0 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv32bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: sub sp, sp, a0 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v16 +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v0, v8 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v20 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v12 +; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v0, v0, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v0 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v12, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv32bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: addi sp, sp, -16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 16 +; ZVFBFA-NEXT: csrr a0, vlenb +; ZVFBFA-NEXT: slli a0, a0, 3 +; ZVFBFA-NEXT: sub sp, sp, a0 +; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v16 +; ZVFBFA-NEXT: addi a0, sp, 16 +; ZVFBFA-NEXT: vs8r.v v24, (a0) # vscale x 64-byte Folded Spill +; ZVFBFA-NEXT: vfwcvt.f.f.v v0, v8 +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v20 +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFBFA-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v0, v0, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v0 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v16, v24 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16 +; ZVFBFA-NEXT: csrr a0, vlenb +; ZVFBFA-NEXT: slli a0, a0, 3 +; ZVFBFA-NEXT: add sp, sp, a0 +; ZVFBFA-NEXT: .cfi_def_cfa sp, 16 +; ZVFBFA-NEXT: addi sp, sp, 16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 0 +; ZVFBFA-NEXT: ret %vc = fadd %va, %vb ret %vc } define @vfadd_vf_nxv32bf16( %va, bfloat %b) { -; CHECK-LABEL: vfadd_vf_nxv32bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: sub sp, sp, a0 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb -; CHECK-NEXT: fmv.x.h a0, fa0 -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v12 -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma -; CHECK-NEXT: vmv.v.x v8, a0 -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v0, v8 -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v0, v8, v0 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v0 -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v16, v24, v16 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add sp, sp, a0 -; CHECK-NEXT: .cfi_def_cfa sp, 16 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: .cfi_def_cfa_offset 0 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv32bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: addi sp, sp, -16 +; ZVFH-NEXT: .cfi_def_cfa_offset 16 +; ZVFH-NEXT: csrr a0, vlenb +; ZVFH-NEXT: slli a0, a0, 3 +; ZVFH-NEXT: sub sp, sp, a0 +; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFH-NEXT: fmv.x.h a0, fa0 +; ZVFH-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8 +; ZVFH-NEXT: addi a1, sp, 16 +; ZVFH-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill +; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v12 +; ZVFH-NEXT: vsetvli a1, zero, e16, m8, ta, ma +; ZVFH-NEXT: vmv.v.x v8, a0 +; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v0, v8 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v12 +; ZVFH-NEXT: addi a0, sp, 16 +; ZVFH-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v0, v8, v0 +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v0 +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v16, v24, v16 +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v12, v16 +; ZVFH-NEXT: csrr a0, vlenb +; ZVFH-NEXT: slli a0, a0, 3 +; ZVFH-NEXT: add sp, sp, a0 +; ZVFH-NEXT: .cfi_def_cfa sp, 16 +; ZVFH-NEXT: addi sp, sp, 16 +; ZVFH-NEXT: .cfi_def_cfa_offset 0 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv32bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: sub sp, sp, a0 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFHMIN-NEXT: fmv.x.h a0, fa0 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v12 +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v8, a0 +; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v0, v8 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v12 +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v0, v8, v0 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v0 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v12, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv32bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: addi sp, sp, -16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 16 +; ZVFBFA-NEXT: csrr a0, vlenb +; ZVFBFA-NEXT: slli a0, a0, 3 +; ZVFBFA-NEXT: sub sp, sp, a0 +; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFBFA-NEXT: fmv.x.h a0, fa0 +; ZVFBFA-NEXT: vsetvli a1, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFBFA-NEXT: addi a1, sp, 16 +; ZVFBFA-NEXT: vs8r.v v16, (a1) # vscale x 64-byte Folded Spill +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v12 +; ZVFBFA-NEXT: vsetvli a1, zero, e16alt, m8, ta, ma +; ZVFBFA-NEXT: vmv.v.x v8, a0 +; ZVFBFA-NEXT: vsetvli a0, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v0, v8 +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFBFA-NEXT: addi a0, sp, 16 +; ZVFBFA-NEXT: vl8r.v v8, (a0) # vscale x 64-byte Folded Reload +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v0, v8, v0 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v0 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v24, v16 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16 +; ZVFBFA-NEXT: csrr a0, vlenb +; ZVFBFA-NEXT: slli a0, a0, 3 +; ZVFBFA-NEXT: add sp, sp, a0 +; ZVFBFA-NEXT: .cfi_def_cfa sp, 16 +; ZVFBFA-NEXT: addi sp, sp, 16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 0 +; ZVFBFA-NEXT: ret %head = insertelement poison, bfloat %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = fadd %va, %splat @@ -285,6 +668,12 @@ define @vfadd_vv_nxv1f16( %va, %va, %vb ret %vc } @@ -306,6 +695,12 @@ define @vfadd_vf_nxv1f16( %va, half %b) { ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv1f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli a0, zero, e16, mf4, ta, ma +; ZVFBFA-NEXT: vfadd.vf v8, v8, fa0 +; ZVFBFA-NEXT: ret %head = insertelement poison, half %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = fadd %va, %splat @@ -329,6 +724,12 @@ define @vfadd_vv_nxv2f16( %va, %va, %vb ret %vc } @@ -350,6 +751,12 @@ define @vfadd_vf_nxv2f16( %va, half %b) { ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv2f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli a0, zero, e16, mf2, ta, ma +; ZVFBFA-NEXT: vfadd.vf v8, v8, fa0 +; ZVFBFA-NEXT: ret %head = insertelement poison, half %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = fadd %va, %splat @@ -373,6 +780,12 @@ define @vfadd_vv_nxv4f16( %va, %va, %vb ret %vc } @@ -394,6 +807,12 @@ define @vfadd_vf_nxv4f16( %va, half %b) { ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv4f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli a0, zero, e16, m1, ta, ma +; ZVFBFA-NEXT: vfadd.vf v8, v8, fa0 +; ZVFBFA-NEXT: ret %head = insertelement poison, half %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = fadd %va, %splat @@ -417,6 +836,12 @@ define @vfadd_vv_nxv8f16( %va, %va, %vb ret %vc } @@ -438,6 +863,12 @@ define @vfadd_vf_nxv8f16( %va, half %b) { ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv8f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFBFA-NEXT: vfadd.vf v8, v8, fa0 +; ZVFBFA-NEXT: ret %head = insertelement poison, half %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = fadd %va, %splat @@ -461,6 +892,12 @@ define @vfadd_fv_nxv8f16( %va, half %b) { ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_fv_nxv8f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli a0, zero, e16, m2, ta, ma +; ZVFBFA-NEXT: vfadd.vf v8, v8, fa0 +; ZVFBFA-NEXT: ret %head = insertelement poison, half %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = fadd %splat, %va @@ -484,6 +921,12 @@ define @vfadd_vv_nxv16f16( %va, %va, %vb ret %vc } @@ -505,6 +948,12 @@ define @vfadd_vf_nxv16f16( %va, half %b ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv16f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli a0, zero, e16, m4, ta, ma +; ZVFBFA-NEXT: vfadd.vf v8, v8, fa0 +; ZVFBFA-NEXT: ret %head = insertelement poison, half %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = fadd %va, %splat @@ -549,6 +998,12 @@ define @vfadd_vv_nxv32f16( %va, %va, %vb ret %vc } @@ -596,6 +1051,12 @@ define @vfadd_vf_nxv32f16( %va, half %b ; ZVFHMIN-NEXT: addi sp, sp, 16 ; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv32f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli a0, zero, e16, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vf v8, v8, fa0 +; ZVFBFA-NEXT: ret %head = insertelement poison, half %b, i32 0 %splat = shufflevector %head, poison, zeroinitializer %vc = fadd %va, %splat diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll index 32e3d6bc663be..633a201c0131a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll @@ -11,52 +11,125 @@ ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfhmin,+zvfhmin,+zfbfmin,+zvfbfmin,+v \ ; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ ; RUN: --check-prefixes=CHECK,ZVFHMIN +; RUN: llc -mtriple=riscv64 -mattr=+zvfhmin,+experimental-zvfbfa,+v \ +; RUN: -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s \ +; RUN: --check-prefixes=CHECK,ZVFBFA declare @llvm.vp.fadd.nxv1bf16(, , , i32) define @vfadd_vv_nxv1bf16( %va, %b, %m, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vv_nxv1bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t -; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma -; CHECK-NEXT: vfadd.vv v9, v9, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv1bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t +; ZVFH-NEXT: vfwcvtbf16.f.f.v v9, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFH-NEXT: vfadd.vv v9, v9, v10, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv1bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v9, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v9, v9, v10, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv1bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v9, v10, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t +; ZVFBFA-NEXT: ret %v = call @llvm.vp.fadd.nxv1bf16( %va, %b, %m, i32 %evl) ret %v } define @vfadd_vv_nxv1bf16_unmasked( %va, %b, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vv_nxv1bf16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9 -; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma -; CHECK-NEXT: vfadd.vv v9, v9, v10 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv1bf16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v9, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFH-NEXT: vfadd.vv v9, v9, v10 +; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv1bf16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v9, v9, v10 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv1bf16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v9, v10 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9 +; ZVFBFA-NEXT: ret %v = call @llvm.vp.fadd.nxv1bf16( %va, %b, splat (i1 true), i32 %evl) ret %v } define @vfadd_vf_nxv1bf16( %va, bfloat %b, %m, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vf_nxv1bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: fmv.x.h a1, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmv.v.x v9, a1 -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t -; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma -; CHECK-NEXT: vfadd.vv v9, v10, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv1bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fmv.x.h a1, fa0 +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vmv.v.x v9, a1 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t +; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFH-NEXT: vfadd.vv v9, v10, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv1bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v9, a1 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v9, v10, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv1bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.h a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; ZVFBFA-NEXT: vmv.v.x v9, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v10, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t +; ZVFBFA-NEXT: ret %elt.head = insertelement poison, bfloat %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer %v = call @llvm.vp.fadd.nxv1bf16( %va, %vb, %m, i32 %evl) @@ -64,18 +137,44 @@ define @vfadd_vf_nxv1bf16( %va, bfloa } define @vfadd_vf_nxv1bf16_commute( %va, bfloat %b, %m, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vf_nxv1bf16_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: fmv.x.h a1, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmv.v.x v9, a1 -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t -; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma -; CHECK-NEXT: vfadd.vv v9, v8, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv1bf16_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fmv.x.h a1, fa0 +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vmv.v.x v9, a1 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t +; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFH-NEXT: vfadd.vv v9, v8, v10, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv1bf16_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v9, a1 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v9, v8, v10, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv1bf16_commute: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.h a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; ZVFBFA-NEXT: vmv.v.x v9, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v8, v10, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t +; ZVFBFA-NEXT: ret %elt.head = insertelement poison, bfloat %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer %v = call @llvm.vp.fadd.nxv1bf16( %vb, %va, %m, i32 %evl) @@ -83,18 +182,44 @@ define @vfadd_vf_nxv1bf16_commute( %v } define @vfadd_vf_nxv1bf16_unmasked( %va, bfloat %b, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vf_nxv1bf16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: fmv.x.h a1, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmv.v.x v9, a1 -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8 -; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma -; CHECK-NEXT: vfadd.vv v9, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv1bf16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fmv.x.h a1, fa0 +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vmv.v.x v9, a1 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v9 +; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFH-NEXT: vfadd.vv v9, v10, v8 +; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv1bf16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v9, a1 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v9 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v9, v10, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv1bf16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.h a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; ZVFBFA-NEXT: vmv.v.x v9, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v10, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9 +; ZVFBFA-NEXT: ret %elt.head = insertelement poison, bfloat %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer %v = call @llvm.vp.fadd.nxv1bf16( %va, %vb, splat (i1 true), i32 %evl) @@ -102,18 +227,44 @@ define @vfadd_vf_nxv1bf16_unmasked( % } define @vfadd_vf_nxv1bf16_unmasked_commute( %va, bfloat %b, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vf_nxv1bf16_unmasked_commute: -; CHECK: # %bb.0: -; CHECK-NEXT: fmv.x.h a1, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma -; CHECK-NEXT: vmv.v.x v9, a1 -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8 -; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma -; CHECK-NEXT: vfadd.vv v9, v8, v10 -; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv1bf16_unmasked_commute: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fmv.x.h a1, fa0 +; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFH-NEXT: vmv.v.x v9, a1 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v9 +; ZVFH-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFH-NEXT: vfadd.vv v9, v8, v10 +; ZVFH-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv1bf16_unmasked_commute: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v9, a1 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v9 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v9, v8, v10 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv1bf16_unmasked_commute: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.h a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; ZVFBFA-NEXT: vmv.v.x v9, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v8, v10 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9 +; ZVFBFA-NEXT: ret %elt.head = insertelement poison, bfloat %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer %v = call @llvm.vp.fadd.nxv1bf16( %vb, %va, splat (i1 true), i32 %evl) @@ -123,48 +274,118 @@ define @vfadd_vf_nxv1bf16_unmasked_commute( @llvm.vp.fadd.nxv2bf16(, , , i32) define @vfadd_vv_nxv2bf16( %va, %b, %m, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vv_nxv2bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t -; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; CHECK-NEXT: vfadd.vv v9, v9, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv2bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t +; ZVFH-NEXT: vfwcvtbf16.f.f.v v9, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFH-NEXT: vfadd.vv v9, v9, v10, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv2bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v9, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v9, v9, v10, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv2bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v9, v10, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t +; ZVFBFA-NEXT: ret %v = call @llvm.vp.fadd.nxv2bf16( %va, %b, %m, i32 %evl) ret %v } define @vfadd_vv_nxv2bf16_unmasked( %va, %b, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vv_nxv2bf16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9 -; CHECK-NEXT: vfwcvtbf16.f.f.v v9, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; CHECK-NEXT: vfadd.vv v9, v9, v10 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv2bf16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v9, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFH-NEXT: vfadd.vv v9, v9, v10 +; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv2bf16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v9, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v9, v9, v10 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv2bf16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v9, v10 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9 +; ZVFBFA-NEXT: ret %v = call @llvm.vp.fadd.nxv2bf16( %va, %b, splat (i1 true), i32 %evl) ret %v } define @vfadd_vf_nxv2bf16( %va, bfloat %b, %m, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vf_nxv2bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: fmv.x.h a1, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmv.v.x v9, a1 -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t -; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; CHECK-NEXT: vfadd.vv v9, v10, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv2bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fmv.x.h a1, fa0 +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vmv.v.x v9, a1 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t +; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFH-NEXT: vfadd.vv v9, v10, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv2bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v9, a1 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v9, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v9, v10, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9, v0.t +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv2bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.h a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; ZVFBFA-NEXT: vmv.v.x v9, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v10, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t +; ZVFBFA-NEXT: ret %elt.head = insertelement poison, bfloat %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer %v = call @llvm.vp.fadd.nxv2bf16( %va, %vb, %m, i32 %evl) @@ -172,18 +393,44 @@ define @vfadd_vf_nxv2bf16( %va, bfloa } define @vfadd_vf_nxv2bf16_unmasked( %va, bfloat %b, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vf_nxv2bf16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: fmv.x.h a1, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma -; CHECK-NEXT: vmv.v.x v9, a1 -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8 -; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v9 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; CHECK-NEXT: vfadd.vv v9, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v9 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv2bf16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fmv.x.h a1, fa0 +; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFH-NEXT: vmv.v.x v9, a1 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v9 +; ZVFH-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFH-NEXT: vfadd.vv v9, v10, v8 +; ZVFH-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv2bf16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v9, a1 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v9 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v9, v10, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v9 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv2bf16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.h a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; ZVFBFA-NEXT: vmv.v.x v9, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v10, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, mf2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9 +; ZVFBFA-NEXT: ret %elt.head = insertelement poison, bfloat %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer %v = call @llvm.vp.fadd.nxv2bf16( %va, %vb, splat (i1 true), i32 %evl) @@ -193,48 +440,118 @@ define @vfadd_vf_nxv2bf16_unmasked( % declare @llvm.vp.fadd.nxv4bf16(, , , i32) define @vfadd_vv_nxv4bf16( %va, %b, %m, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vv_nxv4bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t -; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma -; CHECK-NEXT: vfadd.vv v10, v12, v10, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv4bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t +; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFH-NEXT: vfadd.vv v10, v12, v10, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v10, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv4bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9, v0.t +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v10, v12, v10, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v10, v0.t +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv4bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v10, v12, v10, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m1, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10, v0.t +; ZVFBFA-NEXT: ret %v = call @llvm.vp.fadd.nxv4bf16( %va, %b, %m, i32 %evl) ret %v } define @vfadd_vv_nxv4bf16_unmasked( %va, %b, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vv_nxv4bf16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v9 -; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma -; CHECK-NEXT: vfadd.vv v10, v12, v10 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv4bf16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v9 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFH-NEXT: vfadd.vv v10, v12, v10 +; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv4bf16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v9 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v10, v12, v10 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv4bf16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v10, v12, v10 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m1, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10 +; ZVFBFA-NEXT: ret %v = call @llvm.vp.fadd.nxv4bf16( %va, %b, splat (i1 true), i32 %evl) ret %v } define @vfadd_vf_nxv4bf16( %va, bfloat %b, %m, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vf_nxv4bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: fmv.x.h a1, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmv.v.x v12, a1 -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t -; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma -; CHECK-NEXT: vfadd.vv v10, v10, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv4bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fmv.x.h a1, fa0 +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vmv.v.x v12, a1 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t +; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v12, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFH-NEXT: vfadd.vv v10, v10, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v10, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv4bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v12, a1 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8, v0.t +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v12, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v10, v10, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v10, v0.t +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv4bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.h a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; ZVFBFA-NEXT: vmv.v.x v12, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v12, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v10, v10, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m1, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10, v0.t +; ZVFBFA-NEXT: ret %elt.head = insertelement poison, bfloat %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer %v = call @llvm.vp.fadd.nxv4bf16( %va, %vb, %m, i32 %evl) @@ -242,18 +559,44 @@ define @vfadd_vf_nxv4bf16( %va, bfloa } define @vfadd_vf_nxv4bf16_unmasked( %va, bfloat %b, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vf_nxv4bf16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: fmv.x.h a1, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma -; CHECK-NEXT: vmv.v.x v12, a1 -; CHECK-NEXT: vfwcvtbf16.f.f.v v10, v8 -; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v12 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma -; CHECK-NEXT: vfadd.vv v10, v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v10 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv4bf16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fmv.x.h a1, fa0 +; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFH-NEXT: vmv.v.x v12, a1 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v10, v8 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v12 +; ZVFH-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFH-NEXT: vfadd.vv v10, v10, v8 +; ZVFH-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v10 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv4bf16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v12, a1 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v10, v8 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v12 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v10, v10, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v10 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv4bf16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.h a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; ZVFBFA-NEXT: vmv.v.x v12, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v12 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v10, v10, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m1, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10 +; ZVFBFA-NEXT: ret %elt.head = insertelement poison, bfloat %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer %v = call @llvm.vp.fadd.nxv4bf16( %va, %vb, splat (i1 true), i32 %evl) @@ -263,48 +606,118 @@ define @vfadd_vf_nxv4bf16_unmasked( % declare @llvm.vp.fadd.nxv8bf16(, , , i32) define @vfadd_vv_nxv8bf16( %va, %b, %m, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vv_nxv8bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v10, v0.t -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma -; CHECK-NEXT: vfadd.vv v12, v16, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv8bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v10, v0.t +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFH-NEXT: vfadd.vv v12, v16, v12, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v12, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv8bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v10, v0.t +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v12, v16, v12, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v12, v0.t +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv8bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v10, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFBFA-NEXT: vfadd.vv v12, v16, v12, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12, v0.t +; ZVFBFA-NEXT: ret %v = call @llvm.vp.fadd.nxv8bf16( %va, %b, %m, i32 %evl) ret %v } define @vfadd_vv_nxv8bf16_unmasked( %va, %b, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vv_nxv8bf16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v10 -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma -; CHECK-NEXT: vfadd.vv v12, v16, v12 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv8bf16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v10 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFH-NEXT: vfadd.vv v12, v16, v12 +; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv8bf16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v10 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v12, v16, v12 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv8bf16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFBFA-NEXT: vfadd.vv v12, v16, v12 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12 +; ZVFBFA-NEXT: ret %v = call @llvm.vp.fadd.nxv8bf16( %va, %b, splat (i1 true), i32 %evl) ret %v } define @vfadd_vf_nxv8bf16( %va, bfloat %b, %m, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vf_nxv8bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: fmv.x.h a1, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmv.v.x v16, a1 -; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8, v0.t -; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma -; CHECK-NEXT: vfadd.vv v12, v12, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv8bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fmv.x.h a1, fa0 +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vmv.v.x v16, a1 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v8, v0.t +; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v16, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFH-NEXT: vfadd.vv v12, v12, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v12, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv8bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v16, a1 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v8, v0.t +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v16, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v12, v12, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v12, v0.t +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv8bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.h a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; ZVFBFA-NEXT: vmv.v.x v16, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v16, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFBFA-NEXT: vfadd.vv v12, v12, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12, v0.t +; ZVFBFA-NEXT: ret %elt.head = insertelement poison, bfloat %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer %v = call @llvm.vp.fadd.nxv8bf16( %va, %vb, %m, i32 %evl) @@ -312,18 +725,44 @@ define @vfadd_vf_nxv8bf16( %va, bfloa } define @vfadd_vf_nxv8bf16_unmasked( %va, bfloat %b, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vf_nxv8bf16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: fmv.x.h a1, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma -; CHECK-NEXT: vmv.v.x v16, a1 -; CHECK-NEXT: vfwcvtbf16.f.f.v v12, v8 -; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v16 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma -; CHECK-NEXT: vfadd.vv v12, v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v12 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv8bf16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fmv.x.h a1, fa0 +; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFH-NEXT: vmv.v.x v16, a1 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v12, v8 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v16 +; ZVFH-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFH-NEXT: vfadd.vv v12, v12, v8 +; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v12 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv8bf16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v16, a1 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v12, v8 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v16 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v12, v12, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v12 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv8bf16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.h a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; ZVFBFA-NEXT: vmv.v.x v16, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v16 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFBFA-NEXT: vfadd.vv v12, v12, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12 +; ZVFBFA-NEXT: ret %elt.head = insertelement poison, bfloat %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer %v = call @llvm.vp.fadd.nxv8bf16( %va, %vb, splat (i1 true), i32 %evl) @@ -333,48 +772,118 @@ define @vfadd_vf_nxv8bf16_unmasked( % declare @llvm.vp.fadd.nxv16bf16(, , , i32) define @vfadd_vv_nxv16bf16( %va, %b, %m, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vv_nxv16bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v16, v24, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv16bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t +; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v16, v24, v16, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv16bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv16bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v24, v16, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16, v0.t +; ZVFBFA-NEXT: ret %v = call @llvm.vp.fadd.nxv16bf16( %va, %b, %m, i32 %evl) ret %v } define @vfadd_vv_nxv16bf16_unmasked( %va, %b, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vv_nxv16bf16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12 -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v16, v24, v16 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv16bf16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v12 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v16, v24, v16 +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv16bf16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v12 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv16bf16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v24, v16 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16 +; ZVFBFA-NEXT: ret %v = call @llvm.vp.fadd.nxv16bf16( %va, %b, splat (i1 true), i32 %evl) ret %v } define @vfadd_vf_nxv16bf16( %va, bfloat %b, %m, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vf_nxv16bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: fmv.x.h a1, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmv.v.x v24, a1 -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t -; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v16, v16, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv16bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fmv.x.h a1, fa0 +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vmv.v.x v24, a1 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t +; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v24, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v16, v16, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv16bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v24, a1 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v24, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v16, v16, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv16bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.h a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vmv.v.x v24, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v24, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v16, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16, v0.t +; ZVFBFA-NEXT: ret %elt.head = insertelement poison, bfloat %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer %v = call @llvm.vp.fadd.nxv16bf16( %va, %vb, %m, i32 %evl) @@ -382,18 +891,44 @@ define @vfadd_vf_nxv16bf16( %va, bf } define @vfadd_vf_nxv16bf16_unmasked( %va, bfloat %b, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vf_nxv16bf16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: fmv.x.h a1, fa0 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vmv.v.x v24, a1 -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 -; CHECK-NEXT: vfwcvtbf16.f.f.v v8, v24 -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v16, v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv16bf16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: fmv.x.h a1, fa0 +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vmv.v.x v24, a1 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v8, v24 +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v16, v16, v8 +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv16bf16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v24, a1 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v8, v24 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v16, v16, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv16bf16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.h a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vmv.v.x v24, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v24 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v16, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16 +; ZVFBFA-NEXT: ret %elt.head = insertelement poison, bfloat %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer %v = call @llvm.vp.fadd.nxv16bf16( %va, %vb, splat (i1 true), i32 %evl) @@ -403,173 +938,493 @@ define @vfadd_vf_nxv16bf16_unmasked( @llvm.vp.fadd.nxv32bf16(, , , i32) define @vfadd_vv_nxv32bf16( %va, %b, %m, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vv_nxv32bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 3 -; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb -; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma -; CHECK-NEXT: vmv1r.v v7, v0 -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: slli a1, a2, 1 -; CHECK-NEXT: srli a2, a2, 2 -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vslidedown.vx v0, v0, a2 -; CHECK-NEXT: sltu a2, a0, a3 -; CHECK-NEXT: addi a2, a2, -1 -; CHECK-NEXT: and a2, a2, a3 -; CHECK-NEXT: addi a3, sp, 16 -; CHECK-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v16, v16, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB22_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB22_2: -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v24, v0.t -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v8, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v16, v24, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add sp, sp, a0 -; CHECK-NEXT: .cfi_def_cfa sp, 16 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: .cfi_def_cfa_offset 0 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv32bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: addi sp, sp, -16 +; ZVFH-NEXT: .cfi_def_cfa_offset 16 +; ZVFH-NEXT: csrr a1, vlenb +; ZVFH-NEXT: slli a1, a1, 3 +; ZVFH-NEXT: sub sp, sp, a1 +; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFH-NEXT: vsetvli a1, zero, e8, mf2, ta, ma +; ZVFH-NEXT: vmv1r.v v7, v0 +; ZVFH-NEXT: csrr a2, vlenb +; ZVFH-NEXT: slli a1, a2, 1 +; ZVFH-NEXT: srli a2, a2, 2 +; ZVFH-NEXT: sub a3, a0, a1 +; ZVFH-NEXT: vslidedown.vx v0, v0, a2 +; ZVFH-NEXT: sltu a2, a0, a3 +; ZVFH-NEXT: addi a2, a2, -1 +; ZVFH-NEXT: and a2, a2, a3 +; ZVFH-NEXT: addi a3, sp, 16 +; ZVFH-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill +; ZVFH-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v16, v16, v24, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t +; ZVFH-NEXT: bltu a0, a1, .LBB22_2 +; ZVFH-NEXT: # %bb.1: +; ZVFH-NEXT: mv a0, a1 +; ZVFH-NEXT: .LBB22_2: +; ZVFH-NEXT: vmv1r.v v0, v7 +; ZVFH-NEXT: addi a1, sp, 16 +; ZVFH-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v24, v0.t +; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v8, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v16, v24, v16, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t +; ZVFH-NEXT: csrr a0, vlenb +; ZVFH-NEXT: slli a0, a0, 3 +; ZVFH-NEXT: add sp, sp, a0 +; ZVFH-NEXT: .cfi_def_cfa sp, 16 +; ZVFH-NEXT: addi sp, sp, 16 +; ZVFH-NEXT: .cfi_def_cfa_offset 0 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv32bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vmv1r.v v7, v0 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: slli a1, a2, 1 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: sub a3, a0, a1 +; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2 +; ZVFHMIN-NEXT: sltu a2, a0, a3 +; ZVFHMIN-NEXT: addi a2, a2, -1 +; ZVFHMIN-NEXT: and a2, a2, a3 +; ZVFHMIN-NEXT: addi a3, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t +; ZVFHMIN-NEXT: bltu a0, a1, .LBB22_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a0, a1 +; ZVFHMIN-NEXT: .LBB22_2: +; ZVFHMIN-NEXT: vmv1r.v v0, v7 +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v24, v0.t +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v8, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv32bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: addi sp, sp, -16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 16 +; ZVFBFA-NEXT: csrr a1, vlenb +; ZVFBFA-NEXT: slli a1, a1, 3 +; ZVFBFA-NEXT: sub sp, sp, a1 +; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFBFA-NEXT: vsetvli a1, zero, e8, mf2, ta, ma +; ZVFBFA-NEXT: vmv1r.v v7, v0 +; ZVFBFA-NEXT: csrr a2, vlenb +; ZVFBFA-NEXT: slli a1, a2, 1 +; ZVFBFA-NEXT: srli a2, a2, 2 +; ZVFBFA-NEXT: sub a3, a0, a1 +; ZVFBFA-NEXT: vslidedown.vx v0, v0, a2 +; ZVFBFA-NEXT: sltu a2, a0, a3 +; ZVFBFA-NEXT: addi a2, a2, -1 +; ZVFBFA-NEXT: and a2, a2, a3 +; ZVFBFA-NEXT: addi a3, sp, 16 +; ZVFBFA-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill +; ZVFBFA-NEXT: vsetvli zero, a2, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v20, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v16, v24, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16, v0.t +; ZVFBFA-NEXT: bltu a0, a1, .LBB22_2 +; ZVFBFA-NEXT: # %bb.1: +; ZVFBFA-NEXT: mv a0, a1 +; ZVFBFA-NEXT: .LBB22_2: +; ZVFBFA-NEXT: vmv1r.v v0, v7 +; ZVFBFA-NEXT: addi a1, sp, 16 +; ZVFBFA-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v24, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v24, v16, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16, v0.t +; ZVFBFA-NEXT: csrr a0, vlenb +; ZVFBFA-NEXT: slli a0, a0, 3 +; ZVFBFA-NEXT: add sp, sp, a0 +; ZVFBFA-NEXT: .cfi_def_cfa sp, 16 +; ZVFBFA-NEXT: addi sp, sp, 16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 0 +; ZVFBFA-NEXT: ret %v = call @llvm.vp.fadd.nxv32bf16( %va, %b, %m, i32 %evl) ret %v } define @vfadd_vv_nxv32bf16_unmasked( %va, %b, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vv_nxv32bf16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 3 -; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma -; CHECK-NEXT: vmset.m v24 -; CHECK-NEXT: slli a1, a2, 1 -; CHECK-NEXT: srli a2, a2, 2 -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma -; CHECK-NEXT: vslidedown.vx v0, v24, a2 -; CHECK-NEXT: sltu a2, a0, a3 -; CHECK-NEXT: addi a2, a2, -1 -; CHECK-NEXT: and a2, a2, a3 -; CHECK-NEXT: addi a3, sp, 16 -; CHECK-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v16, v16, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB23_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB23_2: -; CHECK-NEXT: addi a1, sp, 16 -; CHECK-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v24 -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v16, v24, v16 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add sp, sp, a0 -; CHECK-NEXT: .cfi_def_cfa sp, 16 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: .cfi_def_cfa_offset 0 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vv_nxv32bf16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: addi sp, sp, -16 +; ZVFH-NEXT: .cfi_def_cfa_offset 16 +; ZVFH-NEXT: csrr a1, vlenb +; ZVFH-NEXT: slli a1, a1, 3 +; ZVFH-NEXT: sub sp, sp, a1 +; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFH-NEXT: csrr a2, vlenb +; ZVFH-NEXT: vsetvli a1, zero, e8, m4, ta, ma +; ZVFH-NEXT: vmset.m v24 +; ZVFH-NEXT: slli a1, a2, 1 +; ZVFH-NEXT: srli a2, a2, 2 +; ZVFH-NEXT: sub a3, a0, a1 +; ZVFH-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFH-NEXT: vslidedown.vx v0, v24, a2 +; ZVFH-NEXT: sltu a2, a0, a3 +; ZVFH-NEXT: addi a2, a2, -1 +; ZVFH-NEXT: and a2, a2, a3 +; ZVFH-NEXT: addi a3, sp, 16 +; ZVFH-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill +; ZVFH-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v16, v16, v24, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t +; ZVFH-NEXT: bltu a0, a1, .LBB23_2 +; ZVFH-NEXT: # %bb.1: +; ZVFH-NEXT: mv a0, a1 +; ZVFH-NEXT: .LBB23_2: +; ZVFH-NEXT: addi a1, sp, 16 +; ZVFH-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v24 +; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v8 +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v16, v24, v16 +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16 +; ZVFH-NEXT: csrr a0, vlenb +; ZVFH-NEXT: slli a0, a0, 3 +; ZVFH-NEXT: add sp, sp, a0 +; ZVFH-NEXT: .cfi_def_cfa sp, 16 +; ZVFH-NEXT: addi sp, sp, 16 +; ZVFH-NEXT: .cfi_def_cfa_offset 0 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vv_nxv32bf16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma +; ZVFHMIN-NEXT: vmset.m v24 +; ZVFHMIN-NEXT: slli a1, a2, 1 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: sub a3, a0, a1 +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2 +; ZVFHMIN-NEXT: sltu a2, a0, a3 +; ZVFHMIN-NEXT: addi a2, a2, -1 +; ZVFHMIN-NEXT: and a2, a2, a3 +; ZVFHMIN-NEXT: addi a3, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t +; ZVFHMIN-NEXT: bltu a0, a1, .LBB23_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a0, a1 +; ZVFHMIN-NEXT: .LBB23_2: +; ZVFHMIN-NEXT: addi a1, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v24 +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v8 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv32bf16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: addi sp, sp, -16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 16 +; ZVFBFA-NEXT: csrr a1, vlenb +; ZVFBFA-NEXT: slli a1, a1, 3 +; ZVFBFA-NEXT: sub sp, sp, a1 +; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFBFA-NEXT: csrr a2, vlenb +; ZVFBFA-NEXT: vsetvli a1, zero, e8, m4, ta, ma +; ZVFBFA-NEXT: vmset.m v24 +; ZVFBFA-NEXT: slli a1, a2, 1 +; ZVFBFA-NEXT: srli a2, a2, 2 +; ZVFBFA-NEXT: sub a3, a0, a1 +; ZVFBFA-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFBFA-NEXT: vslidedown.vx v0, v24, a2 +; ZVFBFA-NEXT: sltu a2, a0, a3 +; ZVFBFA-NEXT: addi a2, a2, -1 +; ZVFBFA-NEXT: and a2, a2, a3 +; ZVFBFA-NEXT: addi a3, sp, 16 +; ZVFBFA-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill +; ZVFBFA-NEXT: vsetvli zero, a2, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v20, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v16, v24, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16, v0.t +; ZVFBFA-NEXT: bltu a0, a1, .LBB23_2 +; ZVFBFA-NEXT: # %bb.1: +; ZVFBFA-NEXT: mv a0, a1 +; ZVFBFA-NEXT: .LBB23_2: +; ZVFBFA-NEXT: addi a1, sp, 16 +; ZVFBFA-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v24 +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v24, v16 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16 +; ZVFBFA-NEXT: csrr a0, vlenb +; ZVFBFA-NEXT: slli a0, a0, 3 +; ZVFBFA-NEXT: add sp, sp, a0 +; ZVFBFA-NEXT: .cfi_def_cfa sp, 16 +; ZVFBFA-NEXT: addi sp, sp, 16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 0 +; ZVFBFA-NEXT: ret %v = call @llvm.vp.fadd.nxv32bf16( %va, %b, splat (i1 true), i32 %evl) ret %v } define @vfadd_vf_nxv32bf16( %va, bfloat %b, %m, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vf_nxv32bf16: -; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 4 -; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb -; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma -; CHECK-NEXT: vmv1r.v v7, v0 -; CHECK-NEXT: fmv.x.h a1, fa0 -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: vmv.v.x v24, a1 -; CHECK-NEXT: slli a1, a2, 1 -; CHECK-NEXT: srli a2, a2, 2 -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma -; CHECK-NEXT: vslidedown.vx v0, v0, a2 -; CHECK-NEXT: sltu a2, a0, a3 -; CHECK-NEXT: addi a2, a2, -1 -; CHECK-NEXT: and a2, a2, a3 -; CHECK-NEXT: csrr a3, vlenb -; CHECK-NEXT: slli a3, a3, 3 -; CHECK-NEXT: add a3, sp, a3 -; CHECK-NEXT: addi a3, a3, 16 -; CHECK-NEXT: vs8r.v v24, (a3) # vscale x 64-byte Folded Spill -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v28, v0.t -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v16, v24, v16, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB24_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB24_2: -; CHECK-NEXT: vmv1r.v v0, v7 -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add a0, sp, a0 -; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v16, v0.t -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v16, v16, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 4 -; CHECK-NEXT: add sp, sp, a0 -; CHECK-NEXT: .cfi_def_cfa sp, 16 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: .cfi_def_cfa_offset 0 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv32bf16: +; ZVFH: # %bb.0: +; ZVFH-NEXT: addi sp, sp, -16 +; ZVFH-NEXT: .cfi_def_cfa_offset 16 +; ZVFH-NEXT: csrr a1, vlenb +; ZVFH-NEXT: slli a1, a1, 4 +; ZVFH-NEXT: sub sp, sp, a1 +; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; ZVFH-NEXT: vsetvli a1, zero, e16, m8, ta, ma +; ZVFH-NEXT: vmv1r.v v7, v0 +; ZVFH-NEXT: fmv.x.h a1, fa0 +; ZVFH-NEXT: csrr a2, vlenb +; ZVFH-NEXT: vmv.v.x v24, a1 +; ZVFH-NEXT: slli a1, a2, 1 +; ZVFH-NEXT: srli a2, a2, 2 +; ZVFH-NEXT: sub a3, a0, a1 +; ZVFH-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFH-NEXT: vslidedown.vx v0, v0, a2 +; ZVFH-NEXT: sltu a2, a0, a3 +; ZVFH-NEXT: addi a2, a2, -1 +; ZVFH-NEXT: and a2, a2, a3 +; ZVFH-NEXT: csrr a3, vlenb +; ZVFH-NEXT: slli a3, a3, 3 +; ZVFH-NEXT: add a3, sp, a3 +; ZVFH-NEXT: addi a3, a3, 16 +; ZVFH-NEXT: vs8r.v v24, (a3) # vscale x 64-byte Folded Spill +; ZVFH-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v28, v0.t +; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v12, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v16, v24, v16, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t +; ZVFH-NEXT: bltu a0, a1, .LBB24_2 +; ZVFH-NEXT: # %bb.1: +; ZVFH-NEXT: mv a0, a1 +; ZVFH-NEXT: .LBB24_2: +; ZVFH-NEXT: vmv1r.v v0, v7 +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t +; ZVFH-NEXT: addi a0, sp, 16 +; ZVFH-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill +; ZVFH-NEXT: csrr a0, vlenb +; ZVFH-NEXT: slli a0, a0, 3 +; ZVFH-NEXT: add a0, sp, a0 +; ZVFH-NEXT: addi a0, a0, 16 +; ZVFH-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v16, v0.t +; ZVFH-NEXT: addi a0, sp, 16 +; ZVFH-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v16, v16, v24, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t +; ZVFH-NEXT: csrr a0, vlenb +; ZVFH-NEXT: slli a0, a0, 4 +; ZVFH-NEXT: add sp, sp, a0 +; ZVFH-NEXT: .cfi_def_cfa sp, 16 +; ZVFH-NEXT: addi sp, sp, 16 +; ZVFH-NEXT: .cfi_def_cfa_offset 0 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv32bf16: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 4 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmv1r.v v7, v0 +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: vmv.v.x v24, a1 +; ZVFHMIN-NEXT: slli a1, a2, 1 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: sub a3, a0, a1 +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2 +; ZVFHMIN-NEXT: sltu a2, a0, a3 +; ZVFHMIN-NEXT: addi a2, a2, -1 +; ZVFHMIN-NEXT: and a2, a2, a3 +; ZVFHMIN-NEXT: csrr a3, vlenb +; ZVFHMIN-NEXT: slli a3, a3, 3 +; ZVFHMIN-NEXT: add a3, sp, a3 +; ZVFHMIN-NEXT: addi a3, a3, 16 +; ZVFHMIN-NEXT: vs8r.v v24, (a3) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v28, v0.t +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v12, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t +; ZVFHMIN-NEXT: bltu a0, a1, .LBB24_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a0, a1 +; ZVFHMIN-NEXT: .LBB24_2: +; ZVFHMIN-NEXT: vmv1r.v v0, v7 +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8, v0.t +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add a0, sp, a0 +; ZVFHMIN-NEXT: addi a0, a0, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v16, v0.t +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16, v0.t +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 4 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv32bf16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: addi sp, sp, -16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 16 +; ZVFBFA-NEXT: csrr a1, vlenb +; ZVFBFA-NEXT: slli a1, a1, 4 +; ZVFBFA-NEXT: sub sp, sp, a1 +; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; ZVFBFA-NEXT: vsetvli a1, zero, e16, m8, ta, ma +; ZVFBFA-NEXT: vmv1r.v v7, v0 +; ZVFBFA-NEXT: fmv.x.h a1, fa0 +; ZVFBFA-NEXT: csrr a2, vlenb +; ZVFBFA-NEXT: vmv.v.x v24, a1 +; ZVFBFA-NEXT: slli a1, a2, 1 +; ZVFBFA-NEXT: srli a2, a2, 2 +; ZVFBFA-NEXT: sub a3, a0, a1 +; ZVFBFA-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFBFA-NEXT: vslidedown.vx v0, v0, a2 +; ZVFBFA-NEXT: sltu a2, a0, a3 +; ZVFBFA-NEXT: addi a2, a2, -1 +; ZVFBFA-NEXT: and a2, a2, a3 +; ZVFBFA-NEXT: csrr a3, vlenb +; ZVFBFA-NEXT: slli a3, a3, 3 +; ZVFBFA-NEXT: add a3, sp, a3 +; ZVFBFA-NEXT: addi a3, a3, 16 +; ZVFBFA-NEXT: vs8r.v v24, (a3) # vscale x 64-byte Folded Spill +; ZVFBFA-NEXT: vsetvli zero, a2, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v28, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v12, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v24, v16, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16, v0.t +; ZVFBFA-NEXT: bltu a0, a1, .LBB24_2 +; ZVFBFA-NEXT: # %bb.1: +; ZVFBFA-NEXT: mv a0, a1 +; ZVFBFA-NEXT: .LBB24_2: +; ZVFBFA-NEXT: vmv1r.v v0, v7 +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8, v0.t +; ZVFBFA-NEXT: addi a0, sp, 16 +; ZVFBFA-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill +; ZVFBFA-NEXT: csrr a0, vlenb +; ZVFBFA-NEXT: slli a0, a0, 3 +; ZVFBFA-NEXT: add a0, sp, a0 +; ZVFBFA-NEXT: addi a0, a0, 16 +; ZVFBFA-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v16, v0.t +; ZVFBFA-NEXT: addi a0, sp, 16 +; ZVFBFA-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v16, v24, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16, v0.t +; ZVFBFA-NEXT: csrr a0, vlenb +; ZVFBFA-NEXT: slli a0, a0, 4 +; ZVFBFA-NEXT: add sp, sp, a0 +; ZVFBFA-NEXT: .cfi_def_cfa sp, 16 +; ZVFBFA-NEXT: addi sp, sp, 16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 0 +; ZVFBFA-NEXT: ret %elt.head = insertelement poison, bfloat %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer %v = call @llvm.vp.fadd.nxv32bf16( %va, %vb, %m, i32 %evl) @@ -577,56 +1432,158 @@ define @vfadd_vf_nxv32bf16( %va, bf } define @vfadd_vf_nxv32bf16_unmasked( %va, bfloat %b, i32 zeroext %evl) { -; CHECK-LABEL: vfadd_vf_nxv32bf16_unmasked: -; CHECK: # %bb.0: -; CHECK-NEXT: addi sp, sp, -16 -; CHECK-NEXT: .cfi_def_cfa_offset 16 -; CHECK-NEXT: csrr a1, vlenb -; CHECK-NEXT: slli a1, a1, 3 -; CHECK-NEXT: sub sp, sp, a1 -; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb -; CHECK-NEXT: fmv.x.h a1, fa0 -; CHECK-NEXT: csrr a2, vlenb -; CHECK-NEXT: vsetvli a3, zero, e16, m8, ta, ma -; CHECK-NEXT: vmset.m v24 -; CHECK-NEXT: vmv.v.x v16, a1 -; CHECK-NEXT: slli a1, a2, 1 -; CHECK-NEXT: srli a2, a2, 2 -; CHECK-NEXT: sub a3, a0, a1 -; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma -; CHECK-NEXT: vslidedown.vx v0, v24, a2 -; CHECK-NEXT: sltu a2, a0, a3 -; CHECK-NEXT: addi a2, a2, -1 -; CHECK-NEXT: and a2, a2, a3 -; CHECK-NEXT: addi a3, sp, 16 -; CHECK-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill -; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v16, v16, v24, v0.t -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t -; CHECK-NEXT: bltu a0, a1, .LBB25_2 -; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a0, a1 -; CHECK-NEXT: .LBB25_2: -; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma -; CHECK-NEXT: vfwcvtbf16.f.f.v v16, v8 -; CHECK-NEXT: addi a0, sp, 16 -; CHECK-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload -; CHECK-NEXT: vfwcvtbf16.f.f.v v24, v0 -; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma -; CHECK-NEXT: vfadd.vv v16, v16, v24 -; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvtbf16.f.f.w v8, v16 -; CHECK-NEXT: csrr a0, vlenb -; CHECK-NEXT: slli a0, a0, 3 -; CHECK-NEXT: add sp, sp, a0 -; CHECK-NEXT: .cfi_def_cfa sp, 16 -; CHECK-NEXT: addi sp, sp, 16 -; CHECK-NEXT: .cfi_def_cfa_offset 0 -; CHECK-NEXT: ret +; ZVFH-LABEL: vfadd_vf_nxv32bf16_unmasked: +; ZVFH: # %bb.0: +; ZVFH-NEXT: addi sp, sp, -16 +; ZVFH-NEXT: .cfi_def_cfa_offset 16 +; ZVFH-NEXT: csrr a1, vlenb +; ZVFH-NEXT: slli a1, a1, 3 +; ZVFH-NEXT: sub sp, sp, a1 +; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFH-NEXT: fmv.x.h a1, fa0 +; ZVFH-NEXT: csrr a2, vlenb +; ZVFH-NEXT: vsetvli a3, zero, e16, m8, ta, ma +; ZVFH-NEXT: vmset.m v24 +; ZVFH-NEXT: vmv.v.x v16, a1 +; ZVFH-NEXT: slli a1, a2, 1 +; ZVFH-NEXT: srli a2, a2, 2 +; ZVFH-NEXT: sub a3, a0, a1 +; ZVFH-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFH-NEXT: vslidedown.vx v0, v24, a2 +; ZVFH-NEXT: sltu a2, a0, a3 +; ZVFH-NEXT: addi a2, a2, -1 +; ZVFH-NEXT: and a2, a2, a3 +; ZVFH-NEXT: addi a3, sp, 16 +; ZVFH-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill +; ZVFH-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v16, v16, v24, v0.t +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t +; ZVFH-NEXT: bltu a0, a1, .LBB25_2 +; ZVFH-NEXT: # %bb.1: +; ZVFH-NEXT: mv a0, a1 +; ZVFH-NEXT: .LBB25_2: +; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFH-NEXT: vfwcvtbf16.f.f.v v16, v8 +; ZVFH-NEXT: addi a0, sp, 16 +; ZVFH-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload +; ZVFH-NEXT: vfwcvtbf16.f.f.v v24, v0 +; ZVFH-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFH-NEXT: vfadd.vv v16, v16, v24 +; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFH-NEXT: vfncvtbf16.f.f.w v8, v16 +; ZVFH-NEXT: csrr a0, vlenb +; ZVFH-NEXT: slli a0, a0, 3 +; ZVFH-NEXT: add sp, sp, a0 +; ZVFH-NEXT: .cfi_def_cfa sp, 16 +; ZVFH-NEXT: addi sp, sp, 16 +; ZVFH-NEXT: .cfi_def_cfa_offset 0 +; ZVFH-NEXT: ret +; +; ZVFHMIN-LABEL: vfadd_vf_nxv32bf16_unmasked: +; ZVFHMIN: # %bb.0: +; ZVFHMIN-NEXT: addi sp, sp, -16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16 +; ZVFHMIN-NEXT: csrr a1, vlenb +; ZVFHMIN-NEXT: slli a1, a1, 3 +; ZVFHMIN-NEXT: sub sp, sp, a1 +; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFHMIN-NEXT: fmv.x.h a1, fa0 +; ZVFHMIN-NEXT: csrr a2, vlenb +; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmset.m v24 +; ZVFHMIN-NEXT: vmv.v.x v16, a1 +; ZVFHMIN-NEXT: slli a1, a2, 1 +; ZVFHMIN-NEXT: srli a2, a2, 2 +; ZVFHMIN-NEXT: sub a3, a0, a1 +; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2 +; ZVFHMIN-NEXT: sltu a2, a0, a3 +; ZVFHMIN-NEXT: addi a2, a2, -1 +; ZVFHMIN-NEXT: and a2, a2, a3 +; ZVFHMIN-NEXT: addi a3, sp, 16 +; ZVFHMIN-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill +; ZVFHMIN-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v20, v0.t +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v12, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24, v0.t +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v12, v16, v0.t +; ZVFHMIN-NEXT: bltu a0, a1, .LBB25_2 +; ZVFHMIN-NEXT: # %bb.1: +; ZVFHMIN-NEXT: mv a0, a1 +; ZVFHMIN-NEXT: .LBB25_2: +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v16, v8 +; ZVFHMIN-NEXT: addi a0, sp, 16 +; ZVFHMIN-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload +; ZVFHMIN-NEXT: vfwcvtbf16.f.f.v v24, v0 +; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24 +; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vfncvtbf16.f.f.w v8, v16 +; ZVFHMIN-NEXT: csrr a0, vlenb +; ZVFHMIN-NEXT: slli a0, a0, 3 +; ZVFHMIN-NEXT: add sp, sp, a0 +; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16 +; ZVFHMIN-NEXT: addi sp, sp, 16 +; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 +; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv32bf16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: addi sp, sp, -16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 16 +; ZVFBFA-NEXT: csrr a1, vlenb +; ZVFBFA-NEXT: slli a1, a1, 3 +; ZVFBFA-NEXT: sub sp, sp, a1 +; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFBFA-NEXT: fmv.x.h a1, fa0 +; ZVFBFA-NEXT: csrr a2, vlenb +; ZVFBFA-NEXT: vsetvli a3, zero, e16, m8, ta, ma +; ZVFBFA-NEXT: vmset.m v24 +; ZVFBFA-NEXT: vmv.v.x v16, a1 +; ZVFBFA-NEXT: slli a1, a2, 1 +; ZVFBFA-NEXT: srli a2, a2, 2 +; ZVFBFA-NEXT: sub a3, a0, a1 +; ZVFBFA-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFBFA-NEXT: vslidedown.vx v0, v24, a2 +; ZVFBFA-NEXT: sltu a2, a0, a3 +; ZVFBFA-NEXT: addi a2, a2, -1 +; ZVFBFA-NEXT: and a2, a2, a3 +; ZVFBFA-NEXT: addi a3, sp, 16 +; ZVFBFA-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill +; ZVFBFA-NEXT: vsetvli zero, a2, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v20, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v16, v24, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16, v0.t +; ZVFBFA-NEXT: bltu a0, a1, .LBB25_2 +; ZVFBFA-NEXT: # %bb.1: +; ZVFBFA-NEXT: mv a0, a1 +; ZVFBFA-NEXT: .LBB25_2: +; ZVFBFA-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFBFA-NEXT: addi a0, sp, 16 +; ZVFBFA-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v0 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v16, v24 +; ZVFBFA-NEXT: vsetvli zero, zero, e16alt, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16 +; ZVFBFA-NEXT: csrr a0, vlenb +; ZVFBFA-NEXT: slli a0, a0, 3 +; ZVFBFA-NEXT: add sp, sp, a0 +; ZVFBFA-NEXT: .cfi_def_cfa sp, 16 +; ZVFBFA-NEXT: addi sp, sp, 16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 0 +; ZVFBFA-NEXT: ret %elt.head = insertelement poison, bfloat %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer %v = call @llvm.vp.fadd.nxv32bf16( %va, %vb, splat (i1 true), i32 %evl) @@ -651,6 +1608,17 @@ define @vfadd_vv_nxv1f16( %va, @llvm.vp.fadd.nxv1f16( %va, %b, %m, i32 %evl) ret %v } @@ -672,6 +1640,17 @@ define @vfadd_vv_nxv1f16_unmasked( %va, < ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv1f16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v9, v10 +; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9 +; ZVFBFA-NEXT: ret %v = call @llvm.vp.fadd.nxv1f16( %va, %b, splat (i1 true), i32 %evl) ret %v } @@ -695,6 +1674,19 @@ define @vfadd_vf_nxv1f16( %va, half %b, < ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9, v0.t ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv1f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.w a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFBFA-NEXT: vmv.v.x v9, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v10, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t +; ZVFBFA-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer %v = call @llvm.vp.fadd.nxv1f16( %va, %vb, %m, i32 %evl) @@ -720,6 +1712,19 @@ define @vfadd_vf_nxv1f16_commute( %va, ha ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9, v0.t ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv1f16_commute: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.w a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFBFA-NEXT: vmv.v.x v9, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v8, v10, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t +; ZVFBFA-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer %v = call @llvm.vp.fadd.nxv1f16( %vb, %va, %m, i32 %evl) @@ -745,6 +1750,19 @@ define @vfadd_vf_nxv1f16_unmasked( %va, h ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv1f16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.w a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; ZVFBFA-NEXT: vmv.v.x v9, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v10, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9 +; ZVFBFA-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer %v = call @llvm.vp.fadd.nxv1f16( %va, %vb, splat (i1 true), i32 %evl) @@ -770,6 +1788,19 @@ define @vfadd_vf_nxv1f16_unmasked_commute( poison, half %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer %v = call @llvm.vp.fadd.nxv1f16( %vb, %va, splat (i1 true), i32 %evl) @@ -795,6 +1826,17 @@ define @vfadd_vv_nxv2f16( %va, @llvm.vp.fadd.nxv2f16( %va, %b, %m, i32 %evl) ret %v } @@ -816,6 +1858,17 @@ define @vfadd_vv_nxv2f16_unmasked( %va, < ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv2f16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFBFA-NEXT: vfwcvt.f.f.v v9, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v9, v10 +; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9 +; ZVFBFA-NEXT: ret %v = call @llvm.vp.fadd.nxv2f16( %va, %b, splat (i1 true), i32 %evl) ret %v } @@ -839,6 +1892,19 @@ define @vfadd_vf_nxv2f16( %va, half %b, < ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9, v0.t ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv2f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.w a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFBFA-NEXT: vmv.v.x v9, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v10, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9, v0.t +; ZVFBFA-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer %v = call @llvm.vp.fadd.nxv2f16( %va, %vb, %m, i32 %evl) @@ -864,6 +1930,19 @@ define @vfadd_vf_nxv2f16_unmasked( %va, h ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v9 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv2f16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.w a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; ZVFBFA-NEXT: vmv.v.x v9, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v9 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m1, ta, ma +; ZVFBFA-NEXT: vfadd.vv v9, v10, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e16, mf2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v9 +; ZVFBFA-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer %v = call @llvm.vp.fadd.nxv2f16( %va, %vb, splat (i1 true), i32 %evl) @@ -889,6 +1968,17 @@ define @vfadd_vv_nxv4f16( %va, @llvm.vp.fadd.nxv4f16( %va, %b, %m, i32 %evl) ret %v } @@ -910,6 +2000,17 @@ define @vfadd_vv_nxv4f16_unmasked( %va, < ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv4f16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v9 +; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v10, v12, v10 +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10 +; ZVFBFA-NEXT: ret %v = call @llvm.vp.fadd.nxv4f16( %va, %b, splat (i1 true), i32 %evl) ret %v } @@ -933,6 +2034,19 @@ define @vfadd_vf_nxv4f16( %va, half %b, < ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10, v0.t ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv4f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.w a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFBFA-NEXT: vmv.v.x v12, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v12, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v10, v10, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10, v0.t +; ZVFBFA-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer %v = call @llvm.vp.fadd.nxv4f16( %va, %vb, %m, i32 %evl) @@ -958,6 +2072,19 @@ define @vfadd_vf_nxv4f16_unmasked( %va, h ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v10 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv4f16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.w a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; ZVFBFA-NEXT: vmv.v.x v12, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v10, v8 +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v12 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m2, ta, ma +; ZVFBFA-NEXT: vfadd.vv v10, v10, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m1, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v10 +; ZVFBFA-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer %v = call @llvm.vp.fadd.nxv4f16( %va, %vb, splat (i1 true), i32 %evl) @@ -983,6 +2110,17 @@ define @vfadd_vv_nxv8f16( %va, @llvm.vp.fadd.nxv8f16( %va, %b, %m, i32 %evl) ret %v } @@ -1004,6 +2142,17 @@ define @vfadd_vv_nxv8f16_unmasked( %va, < ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv8f16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v10 +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFBFA-NEXT: vfadd.vv v12, v16, v12 +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12 +; ZVFBFA-NEXT: ret %v = call @llvm.vp.fadd.nxv8f16( %va, %b, splat (i1 true), i32 %evl) ret %v } @@ -1027,6 +2176,19 @@ define @vfadd_vf_nxv8f16( %va, half %b, < ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12, v0.t ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv8f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.w a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFBFA-NEXT: vmv.v.x v16, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v16, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFBFA-NEXT: vfadd.vv v12, v12, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12, v0.t +; ZVFBFA-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer %v = call @llvm.vp.fadd.nxv8f16( %va, %vb, %m, i32 %evl) @@ -1052,6 +2214,19 @@ define @vfadd_vf_nxv8f16_unmasked( %va, h ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv8f16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.w a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; ZVFBFA-NEXT: vmv.v.x v16, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v12, v8 +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v16 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m4, ta, ma +; ZVFBFA-NEXT: vfadd.vv v12, v12, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m2, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v12 +; ZVFBFA-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer %v = call @llvm.vp.fadd.nxv8f16( %va, %vb, splat (i1 true), i32 %evl) @@ -1077,6 +2252,17 @@ define @vfadd_vv_nxv16f16( %va, @llvm.vp.fadd.nxv16f16( %va, %b, %m, i32 %evl) ret %v } @@ -1098,6 +2284,17 @@ define @vfadd_vv_nxv16f16_unmasked( %va ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv16f16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12 +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v24, v16 +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16 +; ZVFBFA-NEXT: ret %v = call @llvm.vp.fadd.nxv16f16( %va, %b, splat (i1 true), i32 %evl) ret %v } @@ -1121,6 +2318,19 @@ define @vfadd_vf_nxv16f16( %va, half %b ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16, v0.t ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv16f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.w a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFBFA-NEXT: vmv.v.x v24, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v24, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v16, v8, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16, v0.t +; ZVFBFA-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer %v = call @llvm.vp.fadd.nxv16f16( %va, %vb, %m, i32 %evl) @@ -1146,6 +2356,19 @@ define @vfadd_vf_nxv16f16_unmasked( %va ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv16f16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: fmv.x.w a1, fa0 +; ZVFBFA-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFBFA-NEXT: vmv.v.x v24, a1 +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFBFA-NEXT: vfwcvt.f.f.v v8, v24 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v16, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16 +; ZVFBFA-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer %v = call @llvm.vp.fadd.nxv16f16( %va, %vb, splat (i1 true), i32 %evl) @@ -1209,6 +2432,55 @@ define @vfadd_vv_nxv32f16( %va, @llvm.vp.fadd.nxv32f16( %va, %b, %m, i32 %evl) ret %v } @@ -1268,6 +2540,55 @@ define @vfadd_vv_nxv32f16_unmasked( %va ; ZVFHMIN-NEXT: addi sp, sp, 16 ; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vv_nxv32f16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: addi sp, sp, -16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 16 +; ZVFBFA-NEXT: csrr a1, vlenb +; ZVFBFA-NEXT: slli a1, a1, 3 +; ZVFBFA-NEXT: sub sp, sp, a1 +; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFBFA-NEXT: csrr a2, vlenb +; ZVFBFA-NEXT: vsetvli a1, zero, e8, m4, ta, ma +; ZVFBFA-NEXT: vmset.m v24 +; ZVFBFA-NEXT: slli a1, a2, 1 +; ZVFBFA-NEXT: srli a2, a2, 2 +; ZVFBFA-NEXT: sub a3, a0, a1 +; ZVFBFA-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFBFA-NEXT: vslidedown.vx v0, v24, a2 +; ZVFBFA-NEXT: sltu a2, a0, a3 +; ZVFBFA-NEXT: addi a2, a2, -1 +; ZVFBFA-NEXT: and a2, a2, a3 +; ZVFBFA-NEXT: addi a3, sp, 16 +; ZVFBFA-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill +; ZVFBFA-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v20, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v16, v24, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16, v0.t +; ZVFBFA-NEXT: bltu a0, a1, .LBB49_2 +; ZVFBFA-NEXT: # %bb.1: +; ZVFBFA-NEXT: mv a0, a1 +; ZVFBFA-NEXT: .LBB49_2: +; ZVFBFA-NEXT: addi a1, sp, 16 +; ZVFBFA-NEXT: vl8r.v v24, (a1) # vscale x 64-byte Folded Reload +; ZVFBFA-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v24 +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v8 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v24, v16 +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16 +; ZVFBFA-NEXT: csrr a0, vlenb +; ZVFBFA-NEXT: slli a0, a0, 3 +; ZVFBFA-NEXT: add sp, sp, a0 +; ZVFBFA-NEXT: .cfi_def_cfa sp, 16 +; ZVFBFA-NEXT: addi sp, sp, 16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 0 +; ZVFBFA-NEXT: ret %v = call @llvm.vp.fadd.nxv32f16( %va, %b, splat (i1 true), i32 %evl) ret %v } @@ -1340,6 +2661,68 @@ define @vfadd_vf_nxv32f16( %va, half %b ; ZVFHMIN-NEXT: addi sp, sp, 16 ; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv32f16: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: addi sp, sp, -16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 16 +; ZVFBFA-NEXT: csrr a1, vlenb +; ZVFBFA-NEXT: slli a1, a1, 4 +; ZVFBFA-NEXT: sub sp, sp, a1 +; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; ZVFBFA-NEXT: vsetvli a1, zero, e16, m8, ta, ma +; ZVFBFA-NEXT: vmv1r.v v7, v0 +; ZVFBFA-NEXT: fmv.x.w a1, fa0 +; ZVFBFA-NEXT: csrr a2, vlenb +; ZVFBFA-NEXT: vmv.v.x v24, a1 +; ZVFBFA-NEXT: slli a1, a2, 1 +; ZVFBFA-NEXT: srli a2, a2, 2 +; ZVFBFA-NEXT: sub a3, a0, a1 +; ZVFBFA-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFBFA-NEXT: vslidedown.vx v0, v0, a2 +; ZVFBFA-NEXT: sltu a2, a0, a3 +; ZVFBFA-NEXT: addi a2, a2, -1 +; ZVFBFA-NEXT: and a2, a2, a3 +; ZVFBFA-NEXT: csrr a3, vlenb +; ZVFBFA-NEXT: slli a3, a3, 3 +; ZVFBFA-NEXT: add a3, sp, a3 +; ZVFBFA-NEXT: addi a3, a3, 16 +; ZVFBFA-NEXT: vs8r.v v24, (a3) # vscale x 64-byte Folded Spill +; ZVFBFA-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v28, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v12, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v24, v16, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16, v0.t +; ZVFBFA-NEXT: bltu a0, a1, .LBB50_2 +; ZVFBFA-NEXT: # %bb.1: +; ZVFBFA-NEXT: mv a0, a1 +; ZVFBFA-NEXT: .LBB50_2: +; ZVFBFA-NEXT: vmv1r.v v0, v7 +; ZVFBFA-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8, v0.t +; ZVFBFA-NEXT: addi a0, sp, 16 +; ZVFBFA-NEXT: vs8r.v v16, (a0) # vscale x 64-byte Folded Spill +; ZVFBFA-NEXT: csrr a0, vlenb +; ZVFBFA-NEXT: slli a0, a0, 3 +; ZVFBFA-NEXT: add a0, sp, a0 +; ZVFBFA-NEXT: addi a0, a0, 16 +; ZVFBFA-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v16, v0.t +; ZVFBFA-NEXT: addi a0, sp, 16 +; ZVFBFA-NEXT: vl8r.v v16, (a0) # vscale x 64-byte Folded Reload +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v16, v24, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16, v0.t +; ZVFBFA-NEXT: csrr a0, vlenb +; ZVFBFA-NEXT: slli a0, a0, 4 +; ZVFBFA-NEXT: add sp, sp, a0 +; ZVFBFA-NEXT: .cfi_def_cfa sp, 16 +; ZVFBFA-NEXT: addi sp, sp, 16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 0 +; ZVFBFA-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer %v = call @llvm.vp.fadd.nxv32f16( %va, %vb, %m, i32 %evl) @@ -1403,6 +2786,57 @@ define @vfadd_vf_nxv32f16_unmasked( %va ; ZVFHMIN-NEXT: addi sp, sp, 16 ; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0 ; ZVFHMIN-NEXT: ret +; +; ZVFBFA-LABEL: vfadd_vf_nxv32f16_unmasked: +; ZVFBFA: # %bb.0: +; ZVFBFA-NEXT: addi sp, sp, -16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 16 +; ZVFBFA-NEXT: csrr a1, vlenb +; ZVFBFA-NEXT: slli a1, a1, 3 +; ZVFBFA-NEXT: sub sp, sp, a1 +; ZVFBFA-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb +; ZVFBFA-NEXT: fmv.x.w a1, fa0 +; ZVFBFA-NEXT: csrr a2, vlenb +; ZVFBFA-NEXT: vsetvli a3, zero, e16, m8, ta, ma +; ZVFBFA-NEXT: vmset.m v24 +; ZVFBFA-NEXT: vmv.v.x v16, a1 +; ZVFBFA-NEXT: slli a1, a2, 1 +; ZVFBFA-NEXT: srli a2, a2, 2 +; ZVFBFA-NEXT: sub a3, a0, a1 +; ZVFBFA-NEXT: vsetvli a4, zero, e8, mf2, ta, ma +; ZVFBFA-NEXT: vslidedown.vx v0, v24, a2 +; ZVFBFA-NEXT: sltu a2, a0, a3 +; ZVFBFA-NEXT: addi a2, a2, -1 +; ZVFBFA-NEXT: and a2, a2, a3 +; ZVFBFA-NEXT: addi a3, sp, 16 +; ZVFBFA-NEXT: vs8r.v v16, (a3) # vscale x 64-byte Folded Spill +; ZVFBFA-NEXT: vsetvli zero, a2, e16, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v20, v0.t +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v12, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v16, v24, v0.t +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v12, v16, v0.t +; ZVFBFA-NEXT: bltu a0, a1, .LBB51_2 +; ZVFBFA-NEXT: # %bb.1: +; ZVFBFA-NEXT: mv a0, a1 +; ZVFBFA-NEXT: .LBB51_2: +; ZVFBFA-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; ZVFBFA-NEXT: vfwcvt.f.f.v v16, v8 +; ZVFBFA-NEXT: addi a0, sp, 16 +; ZVFBFA-NEXT: vl8r.v v0, (a0) # vscale x 64-byte Folded Reload +; ZVFBFA-NEXT: vfwcvt.f.f.v v24, v0 +; ZVFBFA-NEXT: vsetvli zero, zero, e32, m8, ta, ma +; ZVFBFA-NEXT: vfadd.vv v16, v16, v24 +; ZVFBFA-NEXT: vsetvli zero, zero, e16, m4, ta, ma +; ZVFBFA-NEXT: vfncvt.f.f.w v8, v16 +; ZVFBFA-NEXT: csrr a0, vlenb +; ZVFBFA-NEXT: slli a0, a0, 3 +; ZVFBFA-NEXT: add sp, sp, a0 +; ZVFBFA-NEXT: .cfi_def_cfa sp, 16 +; ZVFBFA-NEXT: addi sp, sp, 16 +; ZVFBFA-NEXT: .cfi_def_cfa_offset 0 +; ZVFBFA-NEXT: ret %elt.head = insertelement poison, half %b, i32 0 %vb = shufflevector %elt.head, poison, zeroinitializer %v = call @llvm.vp.fadd.nxv32f16( %va, %vb, splat (i1 true), i32 %evl)