Skip to content

Conversation

@4vtomat
Copy link
Member

@4vtomat 4vtomat commented Nov 13, 2025

No description provided.

@llvmbot
Copy link
Member

llvmbot commented Nov 13, 2025

@llvm/pr-subscribers-backend-risc-v

Author: Brandon Wu (4vtomat)

Changes

Patch is 30.08 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/167920.diff

5 Files Affected:

  • (modified) llvm/lib/Target/RISCV/RISCVISelLowering.cpp (+7-11)
  • (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat-bf16.ll (+46)
  • (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splat.ll (+250-70)
  • (modified) llvm/test/CodeGen/RISCV/rvv/vp-splat.ll (+80)
  • (modified) llvm/test/CodeGen/RISCV/rvv/vsplats-bf16.ll (+23)
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 38cce26e44af4..009d278b9bf8b 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -88,9 +88,10 @@ static cl::opt<bool>
                       cl::init(true));
 
 // TODO: Support more ops
-static const unsigned ZvfbfaVPOps[] = {ISD::VP_FNEG, ISD::VP_FABS,
-                                       ISD::VP_FCOPYSIGN};
-static const unsigned ZvfbfaOps[] = {ISD::FNEG, ISD::FABS, ISD::FCOPYSIGN};
+static const unsigned ZvfbfaVPOps[] = {
+    ISD::VP_FNEG, ISD::VP_FABS, ISD::VP_FCOPYSIGN, ISD::EXPERIMENTAL_VP_SPLAT};
+static const unsigned ZvfbfaOps[] = {ISD::FNEG, ISD::FABS, ISD::FCOPYSIGN,
+                                     ISD::SPLAT_VECTOR};
 
 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
                                          const RISCVSubtarget &STI)
@@ -1272,17 +1273,12 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
                          VT, Custom);
       setOperationAction(ISD::EXPERIMENTAL_VP_SPLICE, VT, Custom);
       setOperationAction(ISD::EXPERIMENTAL_VP_REVERSE, VT, Custom);
+      setOperationAction(ISD::EXPERIMENTAL_VP_SPLAT, VT, Custom);
 
       setOperationAction(ISD::FCOPYSIGN, VT, Legal);
+      setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
       setOperationAction(ZvfbfaVPOps, VT, Custom);
 
-      MVT EltVT = VT.getVectorElementType();
-      if (isTypeLegal(EltVT))
-        setOperationAction({ISD::SPLAT_VECTOR, ISD::EXPERIMENTAL_VP_SPLAT}, VT,
-                           Custom);
-      else
-        setOperationAction({ISD::SPLAT_VECTOR, ISD::EXPERIMENTAL_VP_SPLAT},
-                           EltVT, Custom);
       setOperationAction({ISD::LOAD, ISD::STORE, ISD::MLOAD, ISD::MSTORE,
                           ISD::MGATHER, ISD::MSCATTER, ISD::VP_LOAD,
                           ISD::VP_STORE, ISD::EXPERIMENTAL_VP_STRIDED_LOAD,
@@ -4870,7 +4866,7 @@ static SDValue lowerScalarSplat(SDValue Passthru, SDValue Scalar, SDValue VL,
 
   if (VT.isFloatingPoint()) {
     if ((EltVT == MVT::f16 && !Subtarget.hasStdExtZvfh()) ||
-        EltVT == MVT::bf16) {
+        (EltVT == MVT::bf16 && !Subtarget.hasVInstructionsBF16())) {
       if ((EltVT == MVT::bf16 && Subtarget.hasStdExtZfbfmin()) ||
           (EltVT == MVT::f16 && Subtarget.hasStdExtZfhmin()))
         Scalar = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Scalar);
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat-bf16.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat-bf16.ll
index c94cdadc8ca59..82e199b4969db 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat-bf16.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat-bf16.ll
@@ -3,6 +3,8 @@
 ; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFBFMIN
 ; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfbfmin,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZFBFMIN-ZVFBFMIN
 ; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFBFMIN
+; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+experimental-zvfbfa -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFBFA
+; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+experimental-zvfbfa -verify-machineinstrs < %s | FileCheck %s --check-prefixes=ZVFBFA
 
 define <8 x bfloat> @splat_v8bf16(ptr %x, bfloat %y) {
 ; ZFBFMIN-ZVFBFMIN-LABEL: splat_v8bf16:
@@ -18,6 +20,12 @@ define <8 x bfloat> @splat_v8bf16(ptr %x, bfloat %y) {
 ; ZVFBFMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFBFMIN-NEXT:    vmv.v.x v8, a0
 ; ZVFBFMIN-NEXT:    ret
+;
+; ZVFBFA-LABEL: splat_v8bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    vsetvli a0, zero, e16alt, m1, ta, ma
+; ZVFBFA-NEXT:    vfmv.v.f v8, fa0
+; ZVFBFA-NEXT:    ret
   %a = insertelement <8 x bfloat> poison, bfloat %y, i32 0
   %b = shufflevector <8 x bfloat> %a, <8 x bfloat> poison, <8 x i32> zeroinitializer
   ret <8 x bfloat> %b
@@ -37,6 +45,12 @@ define <16 x bfloat> @splat_16bf16(ptr %x, bfloat %y) {
 ; ZVFBFMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFBFMIN-NEXT:    vmv.v.x v8, a0
 ; ZVFBFMIN-NEXT:    ret
+;
+; ZVFBFA-LABEL: splat_16bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    vsetvli a0, zero, e16alt, m2, ta, ma
+; ZVFBFA-NEXT:    vfmv.v.f v8, fa0
+; ZVFBFA-NEXT:    ret
   %a = insertelement <16 x bfloat> poison, bfloat %y, i32 0
   %b = shufflevector <16 x bfloat> %a, <16 x bfloat> poison, <16 x i32> zeroinitializer
   ret <16 x bfloat> %b
@@ -58,6 +72,12 @@ define <64 x bfloat> @splat_64bf16(ptr %x, bfloat %y) {
 ; ZVFBFMIN-NEXT:    vsetvli zero, a1, e16, m8, ta, ma
 ; ZVFBFMIN-NEXT:    vmv.v.x v8, a0
 ; ZVFBFMIN-NEXT:    ret
+;
+; ZVFBFA-LABEL: splat_64bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    vsetvli a0, zero, e16alt, m8, ta, ma
+; ZVFBFA-NEXT:    vfmv.v.f v8, fa0
+; ZVFBFA-NEXT:    ret
   %a = insertelement <64 x bfloat> poison, bfloat %y, i32 0
   %b = shufflevector <64 x bfloat> %a, <64 x bfloat> poison, <64 x i32> zeroinitializer
   ret <64 x bfloat> %b
@@ -75,6 +95,12 @@ define <8 x bfloat> @splat_zero_v8bf16(ptr %x) {
 ; ZVFBFMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFBFMIN-NEXT:    vmv.v.i v8, 0
 ; ZVFBFMIN-NEXT:    ret
+;
+; ZVFBFA-LABEL: splat_zero_v8bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
+; ZVFBFA-NEXT:    vmv.v.i v8, 0
+; ZVFBFA-NEXT:    ret
   ret <8 x bfloat> splat (bfloat 0.0)
 }
 
@@ -90,6 +116,12 @@ define <16 x bfloat> @splat_zero_16bf16(ptr %x) {
 ; ZVFBFMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFBFMIN-NEXT:    vmv.v.i v8, 0
 ; ZVFBFMIN-NEXT:    ret
+;
+; ZVFBFA-LABEL: splat_zero_16bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
+; ZVFBFA-NEXT:    vmv.v.i v8, 0
+; ZVFBFA-NEXT:    ret
   ret <16 x bfloat> splat (bfloat 0.0)
 }
 
@@ -107,6 +139,13 @@ define <8 x bfloat> @splat_negzero_v8bf16(ptr %x) {
 ; ZVFBFMIN-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; ZVFBFMIN-NEXT:    vmv.v.x v8, a0
 ; ZVFBFMIN-NEXT:    ret
+;
+; ZVFBFA-LABEL: splat_negzero_v8bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    lui a0, 1048568
+; ZVFBFA-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
+; ZVFBFA-NEXT:    vmv.v.x v8, a0
+; ZVFBFA-NEXT:    ret
   ret <8 x bfloat> splat (bfloat -0.0)
 }
 
@@ -124,5 +163,12 @@ define <16 x bfloat> @splat_negzero_16bf16(ptr %x) {
 ; ZVFBFMIN-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; ZVFBFMIN-NEXT:    vmv.v.x v8, a0
 ; ZVFBFMIN-NEXT:    ret
+;
+; ZVFBFA-LABEL: splat_negzero_16bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    lui a0, 1048568
+; ZVFBFA-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; ZVFBFA-NEXT:    vmv.v.x v8, a0
+; ZVFBFA-NEXT:    ret
   ret <16 x bfloat> splat (bfloat -0.0)
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splat.ll
index 40e337c811e8b..7901f8c290543 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-splat.ll
@@ -1,6 +1,8 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
-; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
-; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
+; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zvfh,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,ZVFH_RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zvfh,+zvfbfmin -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH,ZVFH_RV64
+; RUN: llc -mtriple=riscv32 -mattr=+v,+d,+zvfh,+experimental-zvfbfa -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFBFA,ZVFBFA_RV32
+; RUN: llc -mtriple=riscv64 -mattr=+v,+d,+zvfh,+experimental-zvfbfa -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFBFA,ZVFBFA_RV64
 
 define <1 x i8> @vp_splat_v1i8(i8 %val, <1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp_splat_v1i8:
@@ -183,97 +185,275 @@ define <16 x i32> @vp_splat_v16i32(i32 %val, <16 x i1> %m, i32 zeroext %evl) {
 }
 
 define <1 x i64> @vp_splat_v1i64(i64 %val, <1 x i1> %m, i32 zeroext %evl) {
-; RV32-LABEL: vp_splat_v1i64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    sw a1, 12(sp)
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT:    vlse64.v v8, (a0), zero
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    .cfi_def_cfa_offset 0
-; RV32-NEXT:    ret
+; ZVFH_RV32-LABEL: vp_splat_v1i64:
+; ZVFH_RV32:       # %bb.0:
+; ZVFH_RV32-NEXT:    addi sp, sp, -16
+; ZVFH_RV32-NEXT:    .cfi_def_cfa_offset 16
+; ZVFH_RV32-NEXT:    sw a0, 8(sp)
+; ZVFH_RV32-NEXT:    sw a1, 12(sp)
+; ZVFH_RV32-NEXT:    addi a0, sp, 8
+; ZVFH_RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; ZVFH_RV32-NEXT:    vlse64.v v8, (a0), zero
+; ZVFH_RV32-NEXT:    addi sp, sp, 16
+; ZVFH_RV32-NEXT:    .cfi_def_cfa_offset 0
+; ZVFH_RV32-NEXT:    ret
 ;
-; RV64-LABEL: vp_splat_v1i64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; RV64-NEXT:    vmv.v.x v8, a0
-; RV64-NEXT:    ret
+; ZVFH_RV64-LABEL: vp_splat_v1i64:
+; ZVFH_RV64:       # %bb.0:
+; ZVFH_RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; ZVFH_RV64-NEXT:    vmv.v.x v8, a0
+; ZVFH_RV64-NEXT:    ret
+;
+; ZVFBFA_RV32-LABEL: vp_splat_v1i64:
+; ZVFBFA_RV32:       # %bb.0:
+; ZVFBFA_RV32-NEXT:    addi sp, sp, -16
+; ZVFBFA_RV32-NEXT:    .cfi_def_cfa_offset 16
+; ZVFBFA_RV32-NEXT:    sw a0, 8(sp)
+; ZVFBFA_RV32-NEXT:    sw a1, 12(sp)
+; ZVFBFA_RV32-NEXT:    addi a0, sp, 8
+; ZVFBFA_RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; ZVFBFA_RV32-NEXT:    vlse64.v v8, (a0), zero
+; ZVFBFA_RV32-NEXT:    addi sp, sp, 16
+; ZVFBFA_RV32-NEXT:    .cfi_def_cfa_offset 0
+; ZVFBFA_RV32-NEXT:    ret
+;
+; ZVFBFA_RV64-LABEL: vp_splat_v1i64:
+; ZVFBFA_RV64:       # %bb.0:
+; ZVFBFA_RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; ZVFBFA_RV64-NEXT:    vmv.v.x v8, a0
+; ZVFBFA_RV64-NEXT:    ret
   %splat = call <1 x i64> @llvm.experimental.vp.splat.v1i64(i64 %val, <1 x i1> %m, i32 %evl)
   ret <1 x i64> %splat
 }
 
 define <2 x i64> @vp_splat_v2i64(i64 %val, <2 x i1> %m, i32 zeroext %evl) {
-; RV32-LABEL: vp_splat_v2i64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    sw a1, 12(sp)
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; RV32-NEXT:    vlse64.v v8, (a0), zero
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    .cfi_def_cfa_offset 0
-; RV32-NEXT:    ret
+; ZVFH_RV32-LABEL: vp_splat_v2i64:
+; ZVFH_RV32:       # %bb.0:
+; ZVFH_RV32-NEXT:    addi sp, sp, -16
+; ZVFH_RV32-NEXT:    .cfi_def_cfa_offset 16
+; ZVFH_RV32-NEXT:    sw a0, 8(sp)
+; ZVFH_RV32-NEXT:    sw a1, 12(sp)
+; ZVFH_RV32-NEXT:    addi a0, sp, 8
+; ZVFH_RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; ZVFH_RV32-NEXT:    vlse64.v v8, (a0), zero
+; ZVFH_RV32-NEXT:    addi sp, sp, 16
+; ZVFH_RV32-NEXT:    .cfi_def_cfa_offset 0
+; ZVFH_RV32-NEXT:    ret
+;
+; ZVFH_RV64-LABEL: vp_splat_v2i64:
+; ZVFH_RV64:       # %bb.0:
+; ZVFH_RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; ZVFH_RV64-NEXT:    vmv.v.x v8, a0
+; ZVFH_RV64-NEXT:    ret
+;
+; ZVFBFA_RV32-LABEL: vp_splat_v2i64:
+; ZVFBFA_RV32:       # %bb.0:
+; ZVFBFA_RV32-NEXT:    addi sp, sp, -16
+; ZVFBFA_RV32-NEXT:    .cfi_def_cfa_offset 16
+; ZVFBFA_RV32-NEXT:    sw a0, 8(sp)
+; ZVFBFA_RV32-NEXT:    sw a1, 12(sp)
+; ZVFBFA_RV32-NEXT:    addi a0, sp, 8
+; ZVFBFA_RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; ZVFBFA_RV32-NEXT:    vlse64.v v8, (a0), zero
+; ZVFBFA_RV32-NEXT:    addi sp, sp, 16
+; ZVFBFA_RV32-NEXT:    .cfi_def_cfa_offset 0
+; ZVFBFA_RV32-NEXT:    ret
 ;
-; RV64-LABEL: vp_splat_v2i64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; RV64-NEXT:    vmv.v.x v8, a0
-; RV64-NEXT:    ret
+; ZVFBFA_RV64-LABEL: vp_splat_v2i64:
+; ZVFBFA_RV64:       # %bb.0:
+; ZVFBFA_RV64-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; ZVFBFA_RV64-NEXT:    vmv.v.x v8, a0
+; ZVFBFA_RV64-NEXT:    ret
   %splat = call <2 x i64> @llvm.experimental.vp.splat.v2i64(i64 %val, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %splat
 }
 
 define <4 x i64> @vp_splat_v4i64(i64 %val, <4 x i1> %m, i32 zeroext %evl) {
-; RV32-LABEL: vp_splat_v4i64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    sw a1, 12(sp)
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; RV32-NEXT:    vlse64.v v8, (a0), zero
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    .cfi_def_cfa_offset 0
-; RV32-NEXT:    ret
+; ZVFH_RV32-LABEL: vp_splat_v4i64:
+; ZVFH_RV32:       # %bb.0:
+; ZVFH_RV32-NEXT:    addi sp, sp, -16
+; ZVFH_RV32-NEXT:    .cfi_def_cfa_offset 16
+; ZVFH_RV32-NEXT:    sw a0, 8(sp)
+; ZVFH_RV32-NEXT:    sw a1, 12(sp)
+; ZVFH_RV32-NEXT:    addi a0, sp, 8
+; ZVFH_RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; ZVFH_RV32-NEXT:    vlse64.v v8, (a0), zero
+; ZVFH_RV32-NEXT:    addi sp, sp, 16
+; ZVFH_RV32-NEXT:    .cfi_def_cfa_offset 0
+; ZVFH_RV32-NEXT:    ret
 ;
-; RV64-LABEL: vp_splat_v4i64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
-; RV64-NEXT:    vmv.v.x v8, a0
-; RV64-NEXT:    ret
+; ZVFH_RV64-LABEL: vp_splat_v4i64:
+; ZVFH_RV64:       # %bb.0:
+; ZVFH_RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; ZVFH_RV64-NEXT:    vmv.v.x v8, a0
+; ZVFH_RV64-NEXT:    ret
+;
+; ZVFBFA_RV32-LABEL: vp_splat_v4i64:
+; ZVFBFA_RV32:       # %bb.0:
+; ZVFBFA_RV32-NEXT:    addi sp, sp, -16
+; ZVFBFA_RV32-NEXT:    .cfi_def_cfa_offset 16
+; ZVFBFA_RV32-NEXT:    sw a0, 8(sp)
+; ZVFBFA_RV32-NEXT:    sw a1, 12(sp)
+; ZVFBFA_RV32-NEXT:    addi a0, sp, 8
+; ZVFBFA_RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; ZVFBFA_RV32-NEXT:    vlse64.v v8, (a0), zero
+; ZVFBFA_RV32-NEXT:    addi sp, sp, 16
+; ZVFBFA_RV32-NEXT:    .cfi_def_cfa_offset 0
+; ZVFBFA_RV32-NEXT:    ret
+;
+; ZVFBFA_RV64-LABEL: vp_splat_v4i64:
+; ZVFBFA_RV64:       # %bb.0:
+; ZVFBFA_RV64-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
+; ZVFBFA_RV64-NEXT:    vmv.v.x v8, a0
+; ZVFBFA_RV64-NEXT:    ret
   %splat = call <4 x i64> @llvm.experimental.vp.splat.v4i64(i64 %val, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %splat
 }
 
 define <8 x i64> @vp_splat_v8i64(i64 %val, <8 x i1> %m, i32 zeroext %evl) {
-; RV32-LABEL: vp_splat_v8i64:
-; RV32:       # %bb.0:
-; RV32-NEXT:    addi sp, sp, -16
-; RV32-NEXT:    .cfi_def_cfa_offset 16
-; RV32-NEXT:    sw a0, 8(sp)
-; RV32-NEXT:    sw a1, 12(sp)
-; RV32-NEXT:    addi a0, sp, 8
-; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
-; RV32-NEXT:    vlse64.v v8, (a0), zero
-; RV32-NEXT:    addi sp, sp, 16
-; RV32-NEXT:    .cfi_def_cfa_offset 0
-; RV32-NEXT:    ret
+; ZVFH_RV32-LABEL: vp_splat_v8i64:
+; ZVFH_RV32:       # %bb.0:
+; ZVFH_RV32-NEXT:    addi sp, sp, -16
+; ZVFH_RV32-NEXT:    .cfi_def_cfa_offset 16
+; ZVFH_RV32-NEXT:    sw a0, 8(sp)
+; ZVFH_RV32-NEXT:    sw a1, 12(sp)
+; ZVFH_RV32-NEXT:    addi a0, sp, 8
+; ZVFH_RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; ZVFH_RV32-NEXT:    vlse64.v v8, (a0), zero
+; ZVFH_RV32-NEXT:    addi sp, sp, 16
+; ZVFH_RV32-NEXT:    .cfi_def_cfa_offset 0
+; ZVFH_RV32-NEXT:    ret
+;
+; ZVFH_RV64-LABEL: vp_splat_v8i64:
+; ZVFH_RV64:       # %bb.0:
+; ZVFH_RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; ZVFH_RV64-NEXT:    vmv.v.x v8, a0
+; ZVFH_RV64-NEXT:    ret
+;
+; ZVFBFA_RV32-LABEL: vp_splat_v8i64:
+; ZVFBFA_RV32:       # %bb.0:
+; ZVFBFA_RV32-NEXT:    addi sp, sp, -16
+; ZVFBFA_RV32-NEXT:    .cfi_def_cfa_offset 16
+; ZVFBFA_RV32-NEXT:    sw a0, 8(sp)
+; ZVFBFA_RV32-NEXT:    sw a1, 12(sp)
+; ZVFBFA_RV32-NEXT:    addi a0, sp, 8
+; ZVFBFA_RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; ZVFBFA_RV32-NEXT:    vlse64.v v8, (a0), zero
+; ZVFBFA_RV32-NEXT:    addi sp, sp, 16
+; ZVFBFA_RV32-NEXT:    .cfi_def_cfa_offset 0
+; ZVFBFA_RV32-NEXT:    ret
 ;
-; RV64-LABEL: vp_splat_v8i64:
-; RV64:       # %bb.0:
-; RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
-; RV64-NEXT:    vmv.v.x v8, a0
-; RV64-NEXT:    ret
+; ZVFBFA_RV64-LABEL: vp_splat_v8i64:
+; ZVFBFA_RV64:       # %bb.0:
+; ZVFBFA_RV64-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
+; ZVFBFA_RV64-NEXT:    vmv.v.x v8, a0
+; ZVFBFA_RV64-NEXT:    ret
   %splat = call <8 x i64> @llvm.experimental.vp.splat.v8i64(i64 %val, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %splat
 }
 
+define <1 x bfloat> @vp_splat_v1bf16(bfloat %val, <1 x i1> %m, i32 zeroext %evl) {
+; ZVFH-LABEL: vp_splat_v1bf16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    fmv.x.w a1, fa0
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vmv.v.x v8, a1
+; ZVFH-NEXT:    ret
+;
+; ZVFBFA-LABEL: vp_splat_v1bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    vsetvli zero, a0, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT:    vfmv.v.f v8, fa0
+; ZVFBFA-NEXT:    ret
+  %splat = call <1 x bfloat> @llvm.experimental.vp.splat.v1bf16(bfloat %val, <1 x i1> %m, i32 %evl)
+  ret <1 x bfloat> %splat
+}
+
+define <2 x bfloat> @vp_splat_v2bf16(bfloat %val, <2 x i1> %m, i32 zeroext %evl) {
+; ZVFH-LABEL: vp_splat_v2bf16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    fmv.x.w a1, fa0
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; ZVFH-NEXT:    vmv.v.x v8, a1
+; ZVFH-NEXT:    ret
+;
+; ZVFBFA-LABEL: vp_splat_v2bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    vsetvli zero, a0, e16alt, mf4, ta, ma
+; ZVFBFA-NEXT:    vfmv.v.f v8, fa0
+; ZVFBFA-NEXT:    ret
+  %splat = call <2 x bfloat> @llvm.experimental.vp.splat.v2bf16(bfloat %val, <2 x i1> %m, i32 %evl)
+  ret <2 x bfloat> %splat
+}
+
+define <4 x bfloat> @vp_splat_v4bf16(bfloat %val, <4 x i1> %m, i32 zeroext %evl) {
+; ZVFH-LABEL: vp_splat_v4bf16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    fmv.x.w a1, fa0
+; ZVFH-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; ZVFH-NEXT:    vmv.v.x v8, a1
+; ZVFH-NEXT:    ret
+;
+; ZVFBFA-LABEL: vp_splat_v4bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    vsetvli zero, a0, e16alt, mf2, ta, ma
+; ZVFBFA-NEXT:    vfmv.v.f v8, fa0
+; ZVFBFA-NEXT:    ret
+  %splat = call <4 x bfloat> @llvm.experimental.vp.splat.v4bf16(bfloat %val, <4 x i1> %m, i32 %evl)
+  ret <4 x bfloat> %splat
+}
+
+define <8 x bfloat> @vp_splat_v8bf16(bfloat %val, <8 x i1> %m, i32 zeroext %evl) {
+; ZVFH-LABEL: vp_splat_v8bf16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    fmv.x.w a1, fa0
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; ZVFH-NEXT:    vmv.v.x v8, a1
+; ZVFH-NEXT:    ret
+;
+; ZVFBFA-LABEL: vp_splat_v8bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    vsetvli zero, a0, e16alt, m1, ta, ma
+; ZVFBFA-NEXT:    vfmv.v.f v8, fa0
+; ZVFBFA-NEXT:    ret
+  %splat = call <8 x bfloat> @llvm.experimental.vp.splat.v8bf16(bfloat %val, <8 x i1> %m, i32 %evl)
+  ret <8 x bfloat> %splat
+}
+
+define <16 x bfloat> @vp_splat_v16bf16(bfloat %val, <16 x i1> %m, i32 zeroext %evl) {
+; ZVFH-LABEL: vp_splat_v16bf16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    fmv.x.w a1, fa0
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; ZVFH-NEXT:    vmv.v.x v8, a1
+; ZVFH-NEXT:    ret
+;
+; ZVFBFA-LABEL: vp_splat_v16bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    vsetvli zero, a0, e16alt, m2, ta, ma
+; ZVFBFA-NEXT:    vfmv.v.f v8, fa0
+; ZVFBFA-NEXT:    ret
+  %splat = call <16 x bfloat> @llvm.experimental.vp.splat.v16bf16(bfloat %val, <16 x i1> %m, i32 %evl)
+  ret <16 x bfloat> %splat
+}
+
+define <32 x bfloat> @vp_splat_v32bf16(bfloat %val, <32 x i1> %m, i32 zeroext %evl) {
+; ZVFH-LABEL: vp_splat_v32bf16:
+; ZVFH:       # %bb.0:
+; ZVFH-NEXT:    fmv.x.w a1, fa0
+; ZVFH-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; ZVFH-NEXT:    vmv.v.x v8, a1
+; ZVFH-NEXT:    ret
+;
+; ZVFBFA-LABEL: vp_splat_v32bf16:
+; ZVFBFA:       # %bb.0:
+; ZVFBFA-NEXT:    vsetvli zero, a0, e16alt, m4, ta, ma
+; ZVFBFA-NEXT:    vfmv.v.f v8, fa0
+; ZVFBFA-NEXT:    ret
+  %splat = call <32 x bfloat> @llvm.experimental.vp.splat.v32bf16(bfloat %val, <32 x i1> %m, i32 %evl)
+  ret <32 x bfloat> %splat
+}
+
 define <1 x half> @vp_splat_v1f16(half %val, <1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vp...
[truncated]

Copy link
Collaborator

@topperc topperc left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

LGTM

@4vtomat 4vtomat enabled auto-merge (squash) November 14, 2025 07:25
@4vtomat 4vtomat merged commit 9fe0a70 into llvm:main Nov 17, 2025
7 of 9 checks passed
@4vtomat 4vtomat deleted the zvfbfa_splat_vector branch November 17, 2025 16:40
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Projects

None yet

Development

Successfully merging this pull request may close these issues.

3 participants