diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td index 35324a90b43342..015585780e58d0 100644 --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -388,6 +388,11 @@ let TargetPrefix = "riscv" in { defm vfsub : RISCVBinaryAAX; defm vfrsub : RISCVBinaryAAX; + defm vfwadd : RISCVBinaryABX; + defm vfwsub : RISCVBinaryABX; + defm vfwadd_w : RISCVBinaryAAX; + defm vfwsub_w : RISCVBinaryAAX; + defm vsaddu : RISCVSaturatingBinaryAAX; defm vsadd : RISCVSaturatingBinaryAAX; defm vssubu : RISCVSaturatingBinaryAAX; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index 9bbba13ea19776..150cf58b0339ac 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -201,6 +201,19 @@ defset list AllWidenableIntVectors = { def : VTypeInfoToWide; } +defset list AllWidenableFloatVectors = { + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; + + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; + def : VTypeInfoToWide; +} + // This class holds the record of the RISCVVPseudoTable below. // This represents the information we need in codegen for each pseudo. // The definition should be consistent with `struct PseudoInfo` in @@ -662,9 +675,10 @@ multiclass VPseudoBinaryW_VV { "@earlyclobber $rd">; } -multiclass VPseudoBinaryW_VX { +multiclass VPseudoBinaryW_VX { foreach m = MxList.m[0-5] in - defm _VX : VPseudoBinary; } @@ -674,9 +688,10 @@ multiclass VPseudoBinaryW_WV { "@earlyclobber $rd">; } -multiclass VPseudoBinaryW_WX { +multiclass VPseudoBinaryW_WX { foreach m = MxList.m[0-5] in - defm _WX : VPseudoBinary; } @@ -757,14 +772,14 @@ multiclass VPseudoBinaryV_VX_VI { defm "" : VPseudoBinaryV_VI; } -multiclass VPseudoBinaryW_VV_VX { +multiclass VPseudoBinaryW_VV_VX { defm "" : VPseudoBinaryW_VV; - defm "" : VPseudoBinaryW_VX; + defm "" : VPseudoBinaryW_VX; } -multiclass VPseudoBinaryW_WV_WX { +multiclass VPseudoBinaryW_WV_WX { defm "" : VPseudoBinaryW_WV; - defm "" : VPseudoBinaryW_WX; + defm "" : VPseudoBinaryW_WX; } multiclass VPseudoBinaryV_VM_XM_IM { @@ -1120,8 +1135,9 @@ multiclass VPatBinaryV_VI; } -multiclass VPatBinaryW_VV { - foreach VtiToWti = AllWidenableIntVectors in { +multiclass VPatBinaryW_VV vtilist> { + foreach VtiToWti = vtilist in { defvar Vti = VtiToWti.Vti; defvar Wti = VtiToWti.Wti; defm : VPatBinary { } } -multiclass VPatBinaryW_VX { - foreach VtiToWti = AllWidenableIntVectors in { +multiclass VPatBinaryW_VX vtilist> { + foreach VtiToWti = vtilist in { defvar Vti = VtiToWti.Vti; defvar Wti = VtiToWti.Wti; - defm : VPatBinary; + Vti.RegClass, Vti.ScalarRegClass>; } } -multiclass VPatBinaryW_WV { - foreach VtiToWti = AllWidenableIntVectors in { +multiclass VPatBinaryW_WV vtilist> { + foreach VtiToWti = vtilist in { defvar Vti = VtiToWti.Vti; defvar Wti = VtiToWti.Wti; defm : VPatBinary { } } -multiclass VPatBinaryW_WX { - foreach VtiToWti = AllWidenableIntVectors in { +multiclass VPatBinaryW_WX vtilist> { + foreach VtiToWti = vtilist in { defvar Vti = VtiToWti.Vti; defvar Wti = VtiToWti.Wti; - defm : VPatBinary; + Wti.RegClass, Vti.ScalarRegClass>; } } -multiclass VPatBinaryV_WV { - foreach VtiToWti = AllWidenableIntVectors in { +multiclass VPatBinaryV_WV vtilist> { + foreach VtiToWti = vtilist in { defvar Vti = VtiToWti.Vti; defvar Wti = VtiToWti.Wti; defm : VPatBinary { } } -multiclass VPatBinaryV_WX { - foreach VtiToWti = AllWidenableIntVectors in { +multiclass VPatBinaryV_WX vtilist> { + foreach VtiToWti = vtilist in { defvar Vti = VtiToWti.Vti; defvar Wti = VtiToWti.Wti; - defm : VPatBinary; + Wti.RegClass, Vti.ScalarRegClass>; } } -multiclass VPatBinaryV_WI { - foreach VtiToWti = AllWidenableIntVectors in { +multiclass VPatBinaryV_WI vtilist> { + foreach VtiToWti = vtilist in { defvar Vti = VtiToWti.Vti; defvar Wti = VtiToWti.Wti; defm : VPatBinary; } -multiclass VPatBinaryW_VV_VX +multiclass VPatBinaryW_VV_VX vtilist> { - defm "" : VPatBinaryW_VV; - defm "" : VPatBinaryW_VX; + defm "" : VPatBinaryW_VV; + defm "" : VPatBinaryW_VX; } -multiclass VPatBinaryW_WV_WX +multiclass VPatBinaryW_WV_WX vtilist> { - defm "" : VPatBinaryW_WV; - defm "" : VPatBinaryW_WX; + defm "" : VPatBinaryW_WV; + defm "" : VPatBinaryW_WX; } -multiclass VPatBinaryV_WV_WX_WI +multiclass VPatBinaryV_WV_WX_WI vtilist> { - defm "" : VPatBinaryV_WV; - defm "" : VPatBinaryV_WX; - defm "" : VPatBinaryV_WI; + defm "" : VPatBinaryV_WV; + defm "" : VPatBinaryV_WX; + defm "" : VPatBinaryV_WI; } multiclass VPatBinaryV_VM_XM_IM @@ -1500,6 +1528,14 @@ defm PseudoVFADD : VPseudoBinaryV_VV_VX; defm PseudoVFSUB : VPseudoBinaryV_VV_VX; defm PseudoVFRSUB : VPseudoBinaryV_VX; +//===----------------------------------------------------------------------===// +// 14.3. Vector Widening Floating-Point Add/Subtract Instructions +//===----------------------------------------------------------------------===// +defm PseudoVFWADD : VPseudoBinaryW_VV_VX; +defm PseudoVFWSUB : VPseudoBinaryW_VV_VX; +defm PseudoVFWADD : VPseudoBinaryW_WV_WX; +defm PseudoVFWSUB : VPseudoBinaryW_WV_WX; + //===----------------------------------------------------------------------===// // 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions //===----------------------------------------------------------------------===// @@ -1674,14 +1710,14 @@ defm "" : VPatBinaryV_VX_VI<"int_riscv_vrsub", "PseudoVRSUB", AllIntegerVectors> //===----------------------------------------------------------------------===// // 12.2. Vector Widening Integer Add/Subtract //===----------------------------------------------------------------------===// -defm "" : VPatBinaryW_VV_VX<"int_riscv_vwaddu", "PseudoVWADDU">; -defm "" : VPatBinaryW_VV_VX<"int_riscv_vwsubu", "PseudoVWSUBU">; -defm "" : VPatBinaryW_VV_VX<"int_riscv_vwadd", "PseudoVWADD">; -defm "" : VPatBinaryW_VV_VX<"int_riscv_vwsub", "PseudoVWSUB">; -defm "" : VPatBinaryW_WV_WX<"int_riscv_vwaddu_w", "PseudoVWADDU">; -defm "" : VPatBinaryW_WV_WX<"int_riscv_vwsubu_w", "PseudoVWSUBU">; -defm "" : VPatBinaryW_WV_WX<"int_riscv_vwadd_w", "PseudoVWADD">; -defm "" : VPatBinaryW_WV_WX<"int_riscv_vwsub_w", "PseudoVWSUB">; +defm "" : VPatBinaryW_VV_VX<"int_riscv_vwaddu", "PseudoVWADDU", AllWidenableIntVectors>; +defm "" : VPatBinaryW_VV_VX<"int_riscv_vwsubu", "PseudoVWSUBU", AllWidenableIntVectors>; +defm "" : VPatBinaryW_VV_VX<"int_riscv_vwadd", "PseudoVWADD", AllWidenableIntVectors>; +defm "" : VPatBinaryW_VV_VX<"int_riscv_vwsub", "PseudoVWSUB", AllWidenableIntVectors>; +defm "" : VPatBinaryW_WV_WX<"int_riscv_vwaddu_w", "PseudoVWADDU", AllWidenableIntVectors>; +defm "" : VPatBinaryW_WV_WX<"int_riscv_vwsubu_w", "PseudoVWSUBU", AllWidenableIntVectors>; +defm "" : VPatBinaryW_WV_WX<"int_riscv_vwadd_w", "PseudoVWADD", AllWidenableIntVectors>; +defm "" : VPatBinaryW_WV_WX<"int_riscv_vwsub_w", "PseudoVWSUB", AllWidenableIntVectors>; //===----------------------------------------------------------------------===// // 12.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions @@ -1707,8 +1743,8 @@ defm "" : VPatBinaryV_VV_VX_VI<"int_riscv_vsra", "PseudoVSRA", AllIntegerVectors //===----------------------------------------------------------------------===// // 12.7. Vector Narrowing Integer Right Shift Instructions //===----------------------------------------------------------------------===// -defm "" : VPatBinaryV_WV_WX_WI<"int_riscv_vnsrl", "PseudoVNSRL">; -defm "" : VPatBinaryV_WV_WX_WI<"int_riscv_vnsra", "PseudoVNSRA">; +defm "" : VPatBinaryV_WV_WX_WI<"int_riscv_vnsrl", "PseudoVNSRL", AllWidenableIntVectors>; +defm "" : VPatBinaryV_WV_WX_WI<"int_riscv_vnsra", "PseudoVNSRA", AllWidenableIntVectors>; //===----------------------------------------------------------------------===// // 12.9. Vector Integer Min/Max Instructions @@ -1737,9 +1773,9 @@ defm "" : VPatBinaryV_VV_VX<"int_riscv_vrem", "PseudoVREM", AllIntegerVectors>; //===----------------------------------------------------------------------===// // 12.12. Vector Widening Integer Multiply Instructions //===----------------------------------------------------------------------===// -defm "" : VPatBinaryW_VV_VX<"int_riscv_vwmul", "PseudoVWMUL">; -defm "" : VPatBinaryW_VV_VX<"int_riscv_vwmulu", "PseudoVWMULU">; -defm "" : VPatBinaryW_VV_VX<"int_riscv_vwmulsu", "PseudoVWMULSU">; +defm "" : VPatBinaryW_VV_VX<"int_riscv_vwmul", "PseudoVWMUL", AllWidenableIntVectors>; +defm "" : VPatBinaryW_VV_VX<"int_riscv_vwmulu", "PseudoVWMULU", AllWidenableIntVectors>; +defm "" : VPatBinaryW_VV_VX<"int_riscv_vwmulsu", "PseudoVWMULSU", AllWidenableIntVectors>; //===----------------------------------------------------------------------===// // 12.17. Vector Integer Move Instructions @@ -1778,6 +1814,14 @@ defm "" : VPatBinaryV_VV_VX<"int_riscv_vfadd", "PseudoVFADD", AllFloatVectors>; defm "" : VPatBinaryV_VV_VX<"int_riscv_vfsub", "PseudoVFSUB", AllFloatVectors>; defm "" : VPatBinaryV_VX<"int_riscv_vfrsub", "PseudoVFRSUB", AllFloatVectors>; +//===----------------------------------------------------------------------===// +// 14.3. Vector Widening Floating-Point Add/Subtract Instructions +//===----------------------------------------------------------------------===// +defm "" : VPatBinaryW_VV_VX<"int_riscv_vfwadd", "PseudoVFWADD", AllWidenableFloatVectors>; +defm "" : VPatBinaryW_VV_VX<"int_riscv_vfwsub", "PseudoVFWSUB", AllWidenableFloatVectors>; +defm "" : VPatBinaryW_WV_WX<"int_riscv_vfwadd_w", "PseudoVFWADD", AllWidenableFloatVectors>; +defm "" : VPatBinaryW_WV_WX<"int_riscv_vfwsub_w", "PseudoVFWSUB", AllWidenableFloatVectors>; + //===----------------------------------------------------------------------===// // 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll new file mode 100644 index 00000000000000..265d33ebb52657 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll @@ -0,0 +1,401 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfwadd.nxv1f16( + , + , + i32); + +define @intrinsic_vfwadd_vv_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv1f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv1f16( + , + , + , + , + i32); + +define @intrinsic_vfwadd_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv1f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv2f16( + , + , + i32); + +define @intrinsic_vfwadd_vv_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv2f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv2f16( + , + , + , + , + i32); + +define @intrinsic_vfwadd_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv2f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv4f16( + , + , + i32); + +define @intrinsic_vfwadd_vv_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv4f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv4f16( + , + , + , + , + i32); + +define @intrinsic_vfwadd_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv4f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv8f16( + , + , + i32); + +define @intrinsic_vfwadd_vv_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv8f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv8f16( + , + , + , + , + i32); + +define @intrinsic_vfwadd_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv8f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv16f16( + , + , + i32); + +define @intrinsic_vfwadd_vv_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv16f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv16f16( + , + , + , + , + i32); + +define @intrinsic_vfwadd_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv16f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv1f16.f16( + , + half, + i32); + +define @intrinsic_vfwadd_vf_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv1f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv1f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfwadd_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv1f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv2f16.f16( + , + half, + i32); + +define @intrinsic_vfwadd_vf_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv2f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv2f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfwadd_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv2f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv4f16.f16( + , + half, + i32); + +define @intrinsic_vfwadd_vf_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv4f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv4f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfwadd_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv4f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv8f16.f16( + , + half, + i32); + +define @intrinsic_vfwadd_vf_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv8f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv8f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfwadd_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv8f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv16f16.f16( + , + half, + i32); + +define @intrinsic_vfwadd_vf_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv16f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv16f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfwadd_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv16f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll new file mode 100644 index 00000000000000..0a7b41f64cf32c --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll @@ -0,0 +1,721 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfwadd.nxv1f16( + , + , + i64); + +define @intrinsic_vfwadd_vv_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv1f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv1f16( + , + , + , + , + i64); + +define @intrinsic_vfwadd_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv1f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv2f16( + , + , + i64); + +define @intrinsic_vfwadd_vv_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv2f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv2f16( + , + , + , + , + i64); + +define @intrinsic_vfwadd_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv2f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv4f16( + , + , + i64); + +define @intrinsic_vfwadd_vv_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv4f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv4f16( + , + , + , + , + i64); + +define @intrinsic_vfwadd_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv4f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv8f16( + , + , + i64); + +define @intrinsic_vfwadd_vv_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv8f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv8f16( + , + , + , + , + i64); + +define @intrinsic_vfwadd_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv8f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv16f16( + , + , + i64); + +define @intrinsic_vfwadd_vv_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv16f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv16f16( + , + , + , + , + i64); + +define @intrinsic_vfwadd_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv16f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv1f32( + , + , + i64); + +define @intrinsic_vfwadd_vv_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv1f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv1f32( + , + , + , + , + i64); + +define @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv1f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv2f32( + , + , + i64); + +define @intrinsic_vfwadd_vv_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv2f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv2f32( + , + , + , + , + i64); + +define @intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv2f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv4f32( + , + , + i64); + +define @intrinsic_vfwadd_vv_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv4f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv4f32( + , + , + , + , + i64); + +define @intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv4f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv8f32( + , + , + i64); + +define @intrinsic_vfwadd_vv_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vv_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv8f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv8f32( + , + , + , + , + i64); + +define @intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv8f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv1f16.f16( + , + half, + i64); + +define @intrinsic_vfwadd_vf_nxv1f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv1f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv1f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfwadd_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv1f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv2f16.f16( + , + half, + i64); + +define @intrinsic_vfwadd_vf_nxv2f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv2f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv2f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfwadd_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv2f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv4f16.f16( + , + half, + i64); + +define @intrinsic_vfwadd_vf_nxv4f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv4f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv4f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfwadd_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv4f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv8f16.f16( + , + half, + i64); + +define @intrinsic_vfwadd_vf_nxv8f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv8f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv8f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfwadd_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv8f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv16f16.f16( + , + half, + i64); + +define @intrinsic_vfwadd_vf_nxv16f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv16f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv16f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfwadd_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv16f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv1f32.f32( + , + float, + i64); + +define @intrinsic_vfwadd_vf_nxv1f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv1f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv1f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfwadd_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv1f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv2f32.f32( + , + float, + i64); + +define @intrinsic_vfwadd_vf_nxv2f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv2f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv2f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfwadd_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv2f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv4f32.f32( + , + float, + i64); + +define @intrinsic_vfwadd_vf_nxv4f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv4f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv4f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfwadd_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv4f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.nxv8f32.f32( + , + float, + i64); + +define @intrinsic_vfwadd_vf_nxv8f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_vf_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.nxv8f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.mask.nxv8f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfwadd_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.mask.nxv8f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll new file mode 100644 index 00000000000000..6537f2cdf66f8d --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll @@ -0,0 +1,401 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfwadd.w.nxv1f16( + , + , + i32); + +define @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv1f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv1f16( + , + , + , + , + i32); + +define @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv1f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv2f16( + , + , + i32); + +define @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f32_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv2f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv2f16( + , + , + , + , + i32); + +define @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv2f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv4f16( + , + , + i32); + +define @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f32_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv4f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv4f16( + , + , + , + , + i32); + +define @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv4f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv8f16( + , + , + i32); + +define @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f32_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv8f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv8f16( + , + , + , + , + i32); + +define @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv8f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv16f16( + , + , + i32); + +define @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv16f32_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv16f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv16f16( + , + , + , + , + i32); + +define @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv16f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv1f32.f16( + , + half, + i32); + +define @intrinsic_vfwadd.w_wf_nxv1f32_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv1f32.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv1f32.f16( + , + , + half, + , + i32); + +define @intrinsic_vfwadd.w_mask_wf_nxv1f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv1f32.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv2f32.f16( + , + half, + i32); + +define @intrinsic_vfwadd.w_wf_nxv2f32_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv2f32.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv2f32.f16( + , + , + half, + , + i32); + +define @intrinsic_vfwadd.w_mask_wf_nxv2f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv2f32.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv4f32.f16( + , + half, + i32); + +define @intrinsic_vfwadd.w_wf_nxv4f32_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv4f32.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv4f32.f16( + , + , + half, + , + i32); + +define @intrinsic_vfwadd.w_mask_wf_nxv4f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv4f32.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv8f32.f16( + , + half, + i32); + +define @intrinsic_vfwadd.w_wf_nxv8f32_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv8f32.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv8f32.f16( + , + , + half, + , + i32); + +define @intrinsic_vfwadd.w_mask_wf_nxv8f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv8f32.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv16f32.f16( + , + half, + i32); + +define @intrinsic_vfwadd.w_wf_nxv16f32_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv16f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv16f32.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv16f32.f16( + , + , + half, + , + i32); + +define @intrinsic_vfwadd.w_mask_wf_nxv16f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv16f32.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll new file mode 100644 index 00000000000000..f77d6e4805bb77 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv64.ll @@ -0,0 +1,721 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfwadd.w.nxv1f16( + , + , + i64); + +define @intrinsic_vfwadd.w_wv_nxv1f32_nxv1f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f32_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv1f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv1f16( + , + , + , + , + i64); + +define @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv1f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv2f16( + , + , + i64); + +define @intrinsic_vfwadd.w_wv_nxv2f32_nxv2f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f32_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv2f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv2f16( + , + , + , + , + i64); + +define @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv2f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv4f16( + , + , + i64); + +define @intrinsic_vfwadd.w_wv_nxv4f32_nxv4f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f32_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv4f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv4f16( + , + , + , + , + i64); + +define @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv4f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv8f16( + , + , + i64); + +define @intrinsic_vfwadd.w_wv_nxv8f32_nxv8f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f32_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv8f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv8f16( + , + , + , + , + i64); + +define @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv8f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv16f16( + , + , + i64); + +define @intrinsic_vfwadd.w_wv_nxv16f32_nxv16f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv16f32_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv16f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv16f16( + , + , + , + , + i64); + +define @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv16f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv1f32( + , + , + i64); + +define @intrinsic_vfwadd.w_wv_nxv1f64_nxv1f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv1f64_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv1f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv1f32( + , + , + , + , + i64); + +define @intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f64_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv1f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv2f32( + , + , + i64); + +define @intrinsic_vfwadd.w_wv_nxv2f64_nxv2f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv2f64_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv2f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv2f32( + , + , + , + , + i64); + +define @intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f64_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv2f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv4f32( + , + , + i64); + +define @intrinsic_vfwadd.w_wv_nxv4f64_nxv4f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv4f64_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv4f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv4f32( + , + , + , + , + i64); + +define @intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f64_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv4f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv8f32( + , + , + i64); + +define @intrinsic_vfwadd.w_wv_nxv8f64_nxv8f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wv_nxv8f64_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv8f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv8f32( + , + , + , + , + i64); + +define @intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f64_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv8f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv1f32.f16( + , + half, + i64); + +define @intrinsic_vfwadd.w_wf_nxv1f32_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv1f32.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv1f32.f16( + , + , + half, + , + i64); + +define @intrinsic_vfwadd.w_mask_wf_nxv1f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv1f32.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv2f32.f16( + , + half, + i64); + +define @intrinsic_vfwadd.w_wf_nxv2f32_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv2f32.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv2f32.f16( + , + , + half, + , + i64); + +define @intrinsic_vfwadd.w_mask_wf_nxv2f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv2f32.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv4f32.f16( + , + half, + i64); + +define @intrinsic_vfwadd.w_wf_nxv4f32_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv4f32.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv4f32.f16( + , + , + half, + , + i64); + +define @intrinsic_vfwadd.w_mask_wf_nxv4f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv4f32.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv8f32.f16( + , + half, + i64); + +define @intrinsic_vfwadd.w_wf_nxv8f32_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv8f32.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv8f32.f16( + , + , + half, + , + i64); + +define @intrinsic_vfwadd.w_mask_wf_nxv8f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv8f32.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv16f32.f16( + , + half, + i64); + +define @intrinsic_vfwadd.w_wf_nxv16f32_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv16f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv16f32.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv16f32.f16( + , + , + half, + , + i64); + +define @intrinsic_vfwadd.w_mask_wf_nxv16f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv16f32.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv1f64.f32( + , + float, + i64); + +define @intrinsic_vfwadd.w_wf_nxv1f64_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv1f64_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv1f64.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv1f64.f32( + , + , + float, + , + i64); + +define @intrinsic_vfwadd.w_mask_wf_nxv1f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f64_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv1f64.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv2f64.f32( + , + float, + i64); + +define @intrinsic_vfwadd.w_wf_nxv2f64_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv2f64_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv2f64.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv2f64.f32( + , + , + float, + , + i64); + +define @intrinsic_vfwadd.w_mask_wf_nxv2f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f64_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv2f64.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv4f64.f32( + , + float, + i64); + +define @intrinsic_vfwadd.w_wf_nxv4f64_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv4f64_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv4f64.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv4f64.f32( + , + , + float, + , + i64); + +define @intrinsic_vfwadd.w_mask_wf_nxv4f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f64_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv4f64.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.nxv8f64.f32( + , + float, + i64); + +define @intrinsic_vfwadd.w_wf_nxv8f64_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_wf_nxv8f64_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwadd.w.nxv8f64.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwadd.w.mask.nxv8f64.f32( + , + , + float, + , + i64); + +define @intrinsic_vfwadd.w_mask_wf_nxv8f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f64_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwadd.w.mask.nxv8f64.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll new file mode 100644 index 00000000000000..90056c1a123ae4 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv32.ll @@ -0,0 +1,401 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfwsub.nxv1f16( + , + , + i32); + +define @intrinsic_vfwsub_vv_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv1f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv1f16( + , + , + , + , + i32); + +define @intrinsic_vfwsub_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv1f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv2f16( + , + , + i32); + +define @intrinsic_vfwsub_vv_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv2f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv2f16( + , + , + , + , + i32); + +define @intrinsic_vfwsub_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv2f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv4f16( + , + , + i32); + +define @intrinsic_vfwsub_vv_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv4f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv4f16( + , + , + , + , + i32); + +define @intrinsic_vfwsub_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv4f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv8f16( + , + , + i32); + +define @intrinsic_vfwsub_vv_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv8f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv8f16( + , + , + , + , + i32); + +define @intrinsic_vfwsub_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv8f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv16f16( + , + , + i32); + +define @intrinsic_vfwsub_vv_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv16f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv16f16( + , + , + , + , + i32); + +define @intrinsic_vfwsub_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv16f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv1f16.f16( + , + half, + i32); + +define @intrinsic_vfwsub_vf_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv1f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv1f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfwsub_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv1f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv2f16.f16( + , + half, + i32); + +define @intrinsic_vfwsub_vf_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv2f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv2f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfwsub_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv2f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv4f16.f16( + , + half, + i32); + +define @intrinsic_vfwsub_vf_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv4f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv4f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfwsub_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv4f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv8f16.f16( + , + half, + i32); + +define @intrinsic_vfwsub_vf_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv8f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv8f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfwsub_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv8f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv16f16.f16( + , + half, + i32); + +define @intrinsic_vfwsub_vf_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv16f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv16f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfwsub_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv16f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll new file mode 100644 index 00000000000000..d0a5c1182fae2e --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub-rv64.ll @@ -0,0 +1,721 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfwsub.nxv1f16( + , + , + i64); + +define @intrinsic_vfwsub_vv_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv1f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv1f16( + , + , + , + , + i64); + +define @intrinsic_vfwsub_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv1f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv2f16( + , + , + i64); + +define @intrinsic_vfwsub_vv_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv2f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv2f16( + , + , + , + , + i64); + +define @intrinsic_vfwsub_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv2f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv4f16( + , + , + i64); + +define @intrinsic_vfwsub_vv_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv4f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv4f16( + , + , + , + , + i64); + +define @intrinsic_vfwsub_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv4f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv8f16( + , + , + i64); + +define @intrinsic_vfwsub_vv_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv8f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv8f16( + , + , + , + , + i64); + +define @intrinsic_vfwsub_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv8f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv16f16( + , + , + i64); + +define @intrinsic_vfwsub_vv_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv16f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv16f16( + , + , + , + , + i64); + +define @intrinsic_vfwsub_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv16f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv1f32( + , + , + i64); + +define @intrinsic_vfwsub_vv_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv1f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv1f32( + , + , + , + , + i64); + +define @intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv1f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv2f32( + , + , + i64); + +define @intrinsic_vfwsub_vv_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv2f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv2f32( + , + , + , + , + i64); + +define @intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv2f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv4f32( + , + , + i64); + +define @intrinsic_vfwsub_vv_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv4f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv4f32( + , + , + , + , + i64); + +define @intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv4f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv8f32( + , + , + i64); + +define @intrinsic_vfwsub_vv_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vv_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv8f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv8f32( + , + , + , + , + i64); + +define @intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vv_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv8f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv1f16.f16( + , + half, + i64); + +define @intrinsic_vfwsub_vf_nxv1f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv1f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv1f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfwsub_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv1f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv2f16.f16( + , + half, + i64); + +define @intrinsic_vfwsub_vf_nxv2f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv2f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv2f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfwsub_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv2f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv4f16.f16( + , + half, + i64); + +define @intrinsic_vfwsub_vf_nxv4f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv4f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv4f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfwsub_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv4f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv8f16.f16( + , + half, + i64); + +define @intrinsic_vfwsub_vf_nxv8f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv8f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv8f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfwsub_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv8f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv16f16.f16( + , + half, + i64); + +define @intrinsic_vfwsub_vf_nxv16f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv16f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv16f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfwsub_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv16f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv1f32.f32( + , + float, + i64); + +define @intrinsic_vfwsub_vf_nxv1f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv1f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv1f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfwsub_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv1f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv2f32.f32( + , + float, + i64); + +define @intrinsic_vfwsub_vf_nxv2f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv2f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv2f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfwsub_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv2f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv4f32.f32( + , + float, + i64); + +define @intrinsic_vfwsub_vf_nxv4f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv4f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv4f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfwsub_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv4f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.nxv8f32.f32( + , + float, + i64); + +define @intrinsic_vfwsub_vf_nxv8f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_vf_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.nxv8f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.mask.nxv8f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfwsub_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub_mask_vf_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.mask.nxv8f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll new file mode 100644 index 00000000000000..2cf5dd6bc439e0 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv32.ll @@ -0,0 +1,401 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfwsub.w.nxv1f16( + , + , + i32); + +define @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv1f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv1f16( + , + , + , + , + i32); + +define @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv1f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv2f16( + , + , + i32); + +define @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f32_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv2f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv2f16( + , + , + , + , + i32); + +define @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv2f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv4f16( + , + , + i32); + +define @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f32_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv4f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv4f16( + , + , + , + , + i32); + +define @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv4f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv8f16( + , + , + i32); + +define @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f32_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv8f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv8f16( + , + , + , + , + i32); + +define @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv8f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv16f16( + , + , + i32); + +define @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv16f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv16f16( + , + , + , + , + i32); + +define @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv16f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv1f32.f16( + , + half, + i32); + +define @intrinsic_vfwsub.w_wf_nxv1f32_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv1f32.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv1f32.f16( + , + , + half, + , + i32); + +define @intrinsic_vfwsub.w_mask_wf_nxv1f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv1f32.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv2f32.f16( + , + half, + i32); + +define @intrinsic_vfwsub.w_wf_nxv2f32_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv2f32.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv2f32.f16( + , + , + half, + , + i32); + +define @intrinsic_vfwsub.w_mask_wf_nxv2f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv2f32.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv4f32.f16( + , + half, + i32); + +define @intrinsic_vfwsub.w_wf_nxv4f32_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv4f32.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv4f32.f16( + , + , + half, + , + i32); + +define @intrinsic_vfwsub.w_mask_wf_nxv4f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv4f32.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv8f32.f16( + , + half, + i32); + +define @intrinsic_vfwsub.w_wf_nxv8f32_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv8f32.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv8f32.f16( + , + , + half, + , + i32); + +define @intrinsic_vfwsub.w_mask_wf_nxv8f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv8f32.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv16f32.f16( + , + half, + i32); + +define @intrinsic_vfwsub.w_wf_nxv16f32_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv16f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv16f32.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv16f32.f16( + , + , + half, + , + i32); + +define @intrinsic_vfwsub.w_mask_wf_nxv16f32_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv16f32.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll new file mode 100644 index 00000000000000..1755ec6bf3c2c2 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfwsub.w-rv64.ll @@ -0,0 +1,721 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfwsub.w.nxv1f16( + , + , + i64); + +define @intrinsic_vfwsub.w_wv_nxv1f32_nxv1f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f32_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv1f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv1f16( + , + , + , + , + i64); + +define @intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f32_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv1f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv2f16( + , + , + i64); + +define @intrinsic_vfwsub.w_wv_nxv2f32_nxv2f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f32_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv2f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv2f16( + , + , + , + , + i64); + +define @intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f32_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv2f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv4f16( + , + , + i64); + +define @intrinsic_vfwsub.w_wv_nxv4f32_nxv4f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f32_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv4f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv4f16( + , + , + , + , + i64); + +define @intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f32_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv4f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv8f16( + , + , + i64); + +define @intrinsic_vfwsub.w_wv_nxv8f32_nxv8f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f32_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv8f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv8f16( + , + , + , + , + i64); + +define @intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f32_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv8f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv16f16( + , + , + i64); + +define @intrinsic_vfwsub.w_wv_nxv16f32_nxv16f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv16f32_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv16f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv16f16( + , + , + , + , + i64); + +define @intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv16f32_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv16f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv1f32( + , + , + i64); + +define @intrinsic_vfwsub.w_wv_nxv1f64_nxv1f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv1f64_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv1f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv1f32( + , + , + , + , + i64); + +define @intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv1f64_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv1f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv2f32( + , + , + i64); + +define @intrinsic_vfwsub.w_wv_nxv2f64_nxv2f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv2f64_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv2f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv2f32( + , + , + , + , + i64); + +define @intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv2f64_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv2f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv4f32( + , + , + i64); + +define @intrinsic_vfwsub.w_wv_nxv4f64_nxv4f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv4f64_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv4f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv4f32( + , + , + , + , + i64); + +define @intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv4f64_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv4f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv8f32( + , + , + i64); + +define @intrinsic_vfwsub.w_wv_nxv8f64_nxv8f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wv_nxv8f64_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv8f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv8f32( + , + , + , + , + i64); + +define @intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wv_nxv8f64_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv8f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv1f32.f16( + , + half, + i64); + +define @intrinsic_vfwsub.w_wf_nxv1f32_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv1f32.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv1f32.f16( + , + , + half, + , + i64); + +define @intrinsic_vfwsub.w_mask_wf_nxv1f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv1f32.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv2f32.f16( + , + half, + i64); + +define @intrinsic_vfwsub.w_wf_nxv2f32_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv2f32.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv2f32.f16( + , + , + half, + , + i64); + +define @intrinsic_vfwsub.w_mask_wf_nxv2f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv2f32.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv4f32.f16( + , + half, + i64); + +define @intrinsic_vfwsub.w_wf_nxv4f32_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv4f32.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv4f32.f16( + , + , + half, + , + i64); + +define @intrinsic_vfwsub.w_mask_wf_nxv4f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv4f32.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv8f32.f16( + , + half, + i64); + +define @intrinsic_vfwsub.w_wf_nxv8f32_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv8f32.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv8f32.f16( + , + , + half, + , + i64); + +define @intrinsic_vfwsub.w_mask_wf_nxv8f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv8f32.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv16f32.f16( + , + half, + i64); + +define @intrinsic_vfwsub.w_wf_nxv16f32_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv16f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv16f32.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv16f32.f16( + , + , + half, + , + i64); + +define @intrinsic_vfwsub.w_mask_wf_nxv16f32_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv16f32_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv16f32.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv1f64.f32( + , + float, + i64); + +define @intrinsic_vfwsub.w_wf_nxv1f64_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv1f64_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv1f64.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv1f64.f32( + , + , + float, + , + i64); + +define @intrinsic_vfwsub.w_mask_wf_nxv1f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv1f64_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv1f64.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv2f64.f32( + , + float, + i64); + +define @intrinsic_vfwsub.w_wf_nxv2f64_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv2f64_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv2f64.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv2f64.f32( + , + , + float, + , + i64); + +define @intrinsic_vfwsub.w_mask_wf_nxv2f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv2f64_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv2f64.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv4f64.f32( + , + float, + i64); + +define @intrinsic_vfwsub.w_wf_nxv4f64_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv4f64_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv4f64.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv4f64.f32( + , + , + float, + , + i64); + +define @intrinsic_vfwsub.w_mask_wf_nxv4f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv4f64_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv4f64.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.nxv8f64.f32( + , + float, + i64); + +define @intrinsic_vfwsub.w_wf_nxv8f64_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_wf_nxv8f64_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfwsub.w.nxv8f64.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfwsub.w.mask.nxv8f64.f32( + , + , + float, + , + i64); + +define @intrinsic_vfwsub.w_mask_wf_nxv8f64_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfwsub.w_mask_wf_nxv8f64_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfwsub.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfwsub.w.mask.nxv8f64.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +}