diff --git a/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td b/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td index 4a0272ca0a0a3..b1bb24e863b35 100644 --- a/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCVXsf.td @@ -162,6 +162,10 @@ let TargetPrefix = "riscv" in { defm "" : RISCVSFCustomVC_XVV<["x", "i", "v", "f"]>; defm "" : RISCVSFCustomVC_XVW<["x", "i", "v", "f"]>; + // XSfvfexp* and XSfvfexpa* + defm sf_vfexp : RISCVUnaryAA; + defm sf_vfexpa : RISCVUnaryAA; + // XSfvqmaccdod def int_riscv_sf_vqmaccu_2x8x2 : RISCVSFCustomVMACC; def int_riscv_sf_vqmacc_2x8x2 : RISCVSFCustomVMACC; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td index 4104abd3b0219..5542766547cdf 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td @@ -218,11 +218,13 @@ let Predicates = [HasVendorXSfvcp], mayLoad = 0, mayStore = 0, } let Predicates = [HasVendorXSfvfexpAny], DecoderNamespace = "XSfvector" in { - def SF_VFEXP_V : VALUVs2<0b010011, 0b00111, OPFVV, "sf.vfexp.v">; + def SF_VFEXP_V : VALUVs2<0b010011, 0b00111, OPFVV, "sf.vfexp.v">, + SchedUnaryMC<"WriteSF_VFExp", "ReadSF_VFExp">; } let Predicates = [HasVendorXSfvfexpa], DecoderNamespace = "XSfvector" in { - def SF_VFEXPA_V : VALUVs2<0b010011, 0b00110, OPFVV, "sf.vfexpa.v">; + def SF_VFEXPA_V : VALUVs2<0b010011, 0b00110, OPFVV, "sf.vfexpa.v">, + SchedUnaryMC<"WriteSF_VFExpa", "ReadSF_VFExpa">; } let Predicates = [HasVendorXSfvqmaccdod], DecoderNamespace = "XSfvector", @@ -487,6 +489,48 @@ let Predicates = [HasVendorXSfvfnrclipxfqf] in { defm SF_VFNRCLIP_X_F_QF : VPseudoSiFiveVFNRCLIP; } +class VFExpSchedSEWSet { + defvar BaseSet = SchedSEWSet.val; + list val = !if(IsBF16, !listremove(BaseSet, [32, 64]), + !if(IsApprox, BaseSet, !listremove(BaseSet, [64]))); +} +multiclass VPseudoVFExp_V { + defvar SchedSuffix = !if(IsApprox, "VFExpa", "VFExp"); + + foreach m = MxListF in { + defvar mx = m.MX; + foreach e = VFExpSchedSEWSet.val in { + let VLMul = m.value in { + def "_V_" # mx # "_E" # e + : VPseudoUnaryNoMask, + SchedUnary<"WriteSF_" # SchedSuffix, "ReadSF_" # SchedSuffix, + mx, e, forcePassthruRead=true>; + def "_V_" # mx # "_E" # e # "_MASK" + : VPseudoUnaryMask, + RISCVMaskedPseudo, + SchedUnary<"WriteSF_" # SchedSuffix, "ReadSF_" # SchedSuffix, + mx, e, forcePassthruRead=true>; + } + } + } +} + +let Predicates = [HasVendorXSfvfbfexp16e], hasSideEffects = 0 in { + let AltFmtType = IS_ALTFMT in { + defm PseudoSF_VFEXP_ALT : VPseudoVFExp_V; + } +} + +let Predicates = [HasVendorXSfvfexpAnyFloat], hasSideEffects = 0 in { + let AltFmtType = IS_NOT_ALTFMT in { + defm PseudoSF_VFEXP : VPseudoVFExp_V; + } +} + +let Predicates = [HasVendorXSfvfexpa], AltFmtType = IS_NOT_ALTFMT in { + defm PseudoSF_VFEXPA : VPseudoVFExp_V; +} + // SDNode def SDT_SF_VC_V_X : SDTypeProfile<1, 4, [SDTCisVec<0>, SDTCisVT<1, XLenVT>, @@ -893,3 +937,36 @@ let Predicates = [HasVendorXSfcease] in { let rs2 = 0b00101; } } + +let Predicates = [HasVendorXSfvfbfexp16e] in { + defm : VPatUnaryV_V<"int_riscv_sf_vfexp", "PseudoSF_VFEXP_ALT", + AllBF16Vectors, + isSEWAware=1>; +} + +let Predicates = [HasVendorXSfvfexp16e] in { + defm : VPatUnaryV_V<"int_riscv_sf_vfexp", "PseudoSF_VFEXP", + [VF16MF4, VF16MF2, VF16M1, VF16M2, VF16M4, VF16M8], + isSEWAware=1>; +} + +let Predicates = [HasVendorXSfvfexp32e] in { + defm : VPatUnaryV_V<"int_riscv_sf_vfexp", "PseudoSF_VFEXP", + [VF32MF2, VF32M1, VF32M2, VF32M4, VF32M8], isSEWAware=1>; +} + +let Predicates = [HasVendorXSfvfexpa] in { + defm : VPatUnaryV_V<"int_riscv_sf_vfexpa", "PseudoSF_VFEXPA", + [VF32MF2, VF32M1, VF32M2, VF32M4, VF32M8], isSEWAware=1>; +} + +let Predicates = [HasVendorXSfvfexpa, HasVInstructionsF16] in { + defm : VPatUnaryV_V<"int_riscv_sf_vfexpa", "PseudoSF_VFEXPA", + [VF16MF4, VF16MF2, VF16M1, VF16M2, VF16M4, VF16M8], + isSEWAware=1>; +} + +let Predicates = [HasVendorXSfvfexpa64e] in { + defm : VPatUnaryV_V<"int_riscv_sf_vfexpa", "PseudoSF_VFEXPA", + [VF64M1, VF64M2, VF64M4, VF64M8], isSEWAware=1>; +} diff --git a/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td b/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td index 637d61fe96b47..36a2f46416674 100644 --- a/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td +++ b/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td @@ -1588,6 +1588,10 @@ multiclass SiFive7SchedResources; defm : LMULReadAdvance<"ReadSF_VFWMACC_QQQ", 0>; } // Unsupported = true } + +defm "" : LMULSEWSchedWritesF<"WriteSF_VFExp">; +defm "" : LMULSEWSchedReadsF<"ReadSF_VFExp">; + +multiclass UnsupportedSchedXSfvfexp { +let Unsupported = true in { +defm : LMULSEWWriteResF<"WriteSF_VFExp", []>; +defm : LMULSEWReadAdvanceF<"ReadSF_VFExp", 0>; +} // Unsupported = true +} + +defm "" : LMULSEWSchedWritesF<"WriteSF_VFExpa">; +defm "" : LMULSEWSchedReadsF<"ReadSF_VFExpa">; + +multiclass UnsupportedSchedXSfvfexpa { +let Unsupported = true in { +defm : LMULSEWWriteResF<"WriteSF_VFExpa", []>; +defm : LMULSEWReadAdvanceF<"ReadSF_VFExpa", 0>; +} // Unsupported = true +} diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vfbfexp16e.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vfbfexp16e.ll new file mode 100644 index 0000000000000..b12d77b96031e --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/sf_vfbfexp16e.ll @@ -0,0 +1,191 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfbfmin,+xsfvfbfexp16e \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfbfmin,+xsfvfbfexp16e \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +define @intrinsic_sf_vfexp_v_nxv1bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_v_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, ta, ma +; CHECK-NEXT: sf.vfexp.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.nxv1bf16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_sf_vfexp_v_nxv2bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_v_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, ta, ma +; CHECK-NEXT: sf.vfexp.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.nxv2bf16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_sf_vfexp_v_nxv4bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_v_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, ta, ma +; CHECK-NEXT: sf.vfexp.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.nxv4bf16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_sf_vfexp_v_nxv8bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_v_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, ta, ma +; CHECK-NEXT: sf.vfexp.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.nxv8bf16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_sf_vfexp_v_nxv16bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_v_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, ta, ma +; CHECK-NEXT: sf.vfexp.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.nxv16bf16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_sf_vfexp_v_nxv32bf16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_v_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, ta, ma +; CHECK-NEXT: sf.vfexp.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.nxv32bf16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_sf_vfexp_mask_v_nxv1bf16( %0, %1, %m, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_mask_v_nxv1bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf4, tu, mu +; CHECK-NEXT: sf.vfexp.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.mask.nxv1bf16( + %0, + %1, + %m, + iXLen %2, iXLen 0) + + ret %a +} + +define @intrinsic_sf_vfexp_mask_v_nxv2bf16( %0, %1, %m, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_mask_v_nxv2bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, mf2, tu, mu +; CHECK-NEXT: sf.vfexp.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.mask.nxv2bf16( + %0, + %1, + %m, + iXLen %2, iXLen 0) + + ret %a +} + +define @intrinsic_sf_vfexp_mask_v_nxv4bf16( %0, %1, %m, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_mask_v_nxv4bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m1, tu, mu +; CHECK-NEXT: sf.vfexp.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.mask.nxv4bf16( + %0, + %1, + %m, + iXLen %2, iXLen 0) + + ret %a +} + +define @intrinsic_sf_vfexp_mask_v_nxv8bf16( %0, %1, %m, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_mask_v_nxv8bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m2, tu, mu +; CHECK-NEXT: sf.vfexp.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.mask.nxv8bf16( + %0, + %1, + %m, + iXLen %2, iXLen 0) + + ret %a +} + +define @intrinsic_sf_vfexp_mask_v_nxv16bf16( %0, %1, %m, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_mask_v_nxv16bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m4, tu, mu +; CHECK-NEXT: sf.vfexp.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.mask.nxv16bf16( + %0, + %1, + %m, + iXLen %2, iXLen 0) + + ret %a +} + +define @intrinsic_sf_vfexp_mask_v_nxv32bf16( %0, %1, %m, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_mask_v_nxv32bf16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16alt, m8, tu, mu +; CHECK-NEXT: sf.vfexp.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.mask.nxv32bf16( + %0, + %1, + %m, + iXLen %2, iXLen 0) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vfexp16e.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vfexp16e.ll new file mode 100644 index 0000000000000..d797d1d05a858 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/sf_vfexp16e.ll @@ -0,0 +1,191 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+xsfvfexp16e \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvfexp16e \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +define @intrinsic_sf_vfexp_v_nxv1f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_v_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vfexp.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.nxv1f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_sf_vfexp_v_nxv2f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_v_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vfexp.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.nxv2f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_sf_vfexp_v_nxv4f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_v_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vfexp.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.nxv4f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_sf_vfexp_v_nxv8f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_v_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vfexp.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.nxv8f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_sf_vfexp_v_nxv16f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_v_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vfexp.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.nxv16f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_sf_vfexp_v_nxv32f16( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_v_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: sf.vfexp.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.nxv32f16( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_sf_vfexp_mask_v_nxv1f16( %0, %1, %m, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_mask_v_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: sf.vfexp.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.mask.nxv1f16( + %0, + %1, + %m, + iXLen %2, iXLen 0) + + ret %a +} + +define @intrinsic_sf_vfexp_mask_v_nxv2f16( %0, %1, %m, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_mask_v_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: sf.vfexp.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.mask.nxv2f16( + %0, + %1, + %m, + iXLen %2, iXLen 0) + + ret %a +} + +define @intrinsic_sf_vfexp_mask_v_nxv4f16( %0, %1, %m, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_mask_v_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: sf.vfexp.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.mask.nxv4f16( + %0, + %1, + %m, + iXLen %2, iXLen 0) + + ret %a +} + +define @intrinsic_sf_vfexp_mask_v_nxv8f16( %0, %1, %m, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_mask_v_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: sf.vfexp.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.mask.nxv8f16( + %0, + %1, + %m, + iXLen %2, iXLen 0) + + ret %a +} + +define @intrinsic_sf_vfexp_mask_v_nxv16f16( %0, %1, %m, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_mask_v_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: sf.vfexp.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.mask.nxv16f16( + %0, + %1, + %m, + iXLen %2, iXLen 0) + + ret %a +} + +define @intrinsic_sf_vfexp_mask_v_nxv32f16( %0, %1, %m, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_mask_v_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: sf.vfexp.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.mask.nxv32f16( + %0, + %1, + %m, + iXLen %2, iXLen 0) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vfexp32e.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vfexp32e.ll new file mode 100644 index 0000000000000..1cf29fe61fdad --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/sf_vfexp32e.ll @@ -0,0 +1,160 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+xsfvfexp32e \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+xsfvfexp32e \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +define @intrinsic_sf_vfexp_v_nxv1f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_v_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vfexp.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.nxv1f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_sf_vfexp_v_nxv2f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_v_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vfexp.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.nxv2f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_sf_vfexp_v_nxv4f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_v_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vfexp.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.nxv4f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_sf_vfexp_v_nxv8f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_v_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vfexp.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.nxv8f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_sf_vfexp_v_nxv16f32( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_v_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: sf.vfexp.v v8, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.nxv16f32( + undef, + %0, + iXLen %1) + + ret %a +} + +define @intrinsic_sf_vfexp_mask_v_nxv1f32( %0, %1, %m, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_mask_v_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: sf.vfexp.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.mask.nxv1f32( + %0, + %1, + %m, + iXLen %2, iXLen 0) + + ret %a +} + +define @intrinsic_sf_vfexp_mask_v_nxv2f32( %0, %1, %m, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_mask_v_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: sf.vfexp.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.mask.nxv2f32( + %0, + %1, + %m, + iXLen %2, iXLen 0) + + ret %a +} + +define @intrinsic_sf_vfexp_mask_v_nxv4f32( %0, %1, %m, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_mask_v_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: sf.vfexp.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.mask.nxv4f32( + %0, + %1, + %m, + iXLen %2, iXLen 0) + + ret %a +} + +define @intrinsic_sf_vfexp_mask_v_nxv8f32( %0, %1, %m, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_mask_v_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: sf.vfexp.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.mask.nxv8f32( + %0, + %1, + %m, + iXLen %2, iXLen 0) + + ret %a +} + +define @intrinsic_sf_vfexp_mask_v_nxv16f32( %0, %1, %m, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_sf_vfexp_mask_v_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: sf.vfexp.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.sf.vfexp.mask.nxv16f32( + %0, + %1, + %m, + iXLen %2, iXLen 0) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vfexpa.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vfexpa.ll new file mode 100644 index 0000000000000..82ded43a7fac8 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/sf_vfexpa.ll @@ -0,0 +1,335 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+zve64f,+zvfh,+xsfvfexpa \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+zve64f,+zvfh,+xsfvfexpa \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +define @test_intrinsic_sf_vfexpa_v_nxv1f32( %0, iXLen %1) { +; CHECK-LABEL: test_intrinsic_sf_vfexpa_v_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: sf.vfexpa.v v8, v8 +; CHECK-NEXT: ret +entry: + %f = call @llvm.riscv.sf.vfexpa.nxv1f32( + undef, + %0, + iXLen %1) + ret %f +} + +define @test_intrinsic_sf_vfexpa_v_nxv2f32( %0, iXLen %1) { +; CHECK-LABEL: test_intrinsic_sf_vfexpa_v_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: sf.vfexpa.v v8, v8 +; CHECK-NEXT: ret +entry: + %f = call @llvm.riscv.sf.vfexpa.nxv2f32( + undef, + %0, + iXLen %1) + ret %f +} + +define @test_intrinsic_sf_vfexpa_v_nxv4f32( %0, iXLen %1) { +; CHECK-LABEL: test_intrinsic_sf_vfexpa_v_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: sf.vfexpa.v v8, v8 +; CHECK-NEXT: ret +entry: + %f = call @llvm.riscv.sf.vfexpa.nxv4f32( + undef, + %0, + iXLen %1) + ret %f +} + +define @test_intrinsic_sf_vfexpa_v_nxv8f32( %0, iXLen %1) { +; CHECK-LABEL: test_intrinsic_sf_vfexpa_v_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: sf.vfexpa.v v8, v8 +; CHECK-NEXT: ret +entry: + %f = call @llvm.riscv.sf.vfexpa.nxv8f32( + undef, + %0, + iXLen %1) + ret %f +} + +define @test_intrinsic_sf_vfexpa_v_nxv16f32( %0, iXLen %1) { +; CHECK-LABEL: test_intrinsic_sf_vfexpa_v_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: sf.vfexpa.v v8, v8 +; CHECK-NEXT: ret +entry: + %f = call @llvm.riscv.sf.vfexpa.nxv16f32( + undef, + %0, + iXLen %1) + ret %f +} + +define @test_intrinsic_sf_vfexpa_v_mask_nxv1f32( %0, %1, %m, iXLen %vl) { +; CHECK-LABEL: test_intrinsic_sf_vfexpa_v_mask_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu +; CHECK-NEXT: sf.vfexpa.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %f = call @llvm.riscv.sf.vfexpa.mask.nxv1f32( + %0, + %1, + %m, + iXLen %vl, + iXLen 0) + ret %f +} + +define @test_intrinsic_sf_vfexpa_v_mask_nxv2f32( %0, %1, %m, iXLen %vl) { +; CHECK-LABEL: test_intrinsic_sf_vfexpa_v_mask_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu +; CHECK-NEXT: sf.vfexpa.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %f = call @llvm.riscv.sf.vfexpa.mask.nxv2f32( + %0, + %1, + %m, + iXLen %vl, + iXLen 0) + ret %f +} + +define @test_intrinsic_sf_vfexpa_v_mask_nxv4f32( %0, %1, %m, iXLen %vl) { +; CHECK-LABEL: test_intrinsic_sf_vfexpa_v_mask_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu +; CHECK-NEXT: sf.vfexpa.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %f = call @llvm.riscv.sf.vfexpa.mask.nxv4f32( + %0, + %1, + %m, + iXLen %vl, + iXLen 0) + ret %f +} + +define @test_intrinsic_sf_vfexpa_v_mask_nxv8f32( %0, %1, %m, iXLen %vl) { +; CHECK-LABEL: test_intrinsic_sf_vfexpa_v_mask_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu +; CHECK-NEXT: sf.vfexpa.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %f = call @llvm.riscv.sf.vfexpa.mask.nxv8f32( + %0, + %1, + %m, + iXLen %vl, + iXLen 0) + ret %f +} + +define @test_intrinsic_sf_vfexpa_v_mask_nxv16f32( %0, %1, %m, iXLen %vl) { +; CHECK-LABEL: test_intrinsic_sf_vfexpa_v_mask_nxv16f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu +; CHECK-NEXT: sf.vfexpa.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %f = call @llvm.riscv.sf.vfexpa.mask.nxv16f32( + %0, + %1, + %m, + iXLen %vl, + iXLen 0) + ret %f +} + +define @test_intrinsic_sf_vfexpa_v_nxv1f16( %0, iXLen %1) { +; CHECK-LABEL: test_intrinsic_sf_vfexpa_v_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: sf.vfexpa.v v8, v8 +; CHECK-NEXT: ret +entry: + %f = call @llvm.riscv.sf.vfexpa.nxv1f16( + undef, + %0, + iXLen %1) + ret %f +} + +define @test_intrinsic_sf_vfexpa_v_nxv2f16( %0, iXLen %1) { +; CHECK-LABEL: test_intrinsic_sf_vfexpa_v_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: sf.vfexpa.v v8, v8 +; CHECK-NEXT: ret +entry: + %f = call @llvm.riscv.sf.vfexpa.nxv2f16( + undef, + %0, + iXLen %1) + ret %f +} + +define @test_intrinsic_sf_vfexpa_v_nxv4f16( %0, iXLen %1) { +; CHECK-LABEL: test_intrinsic_sf_vfexpa_v_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: sf.vfexpa.v v8, v8 +; CHECK-NEXT: ret +entry: + %f = call @llvm.riscv.sf.vfexpa.nxv4f16( + undef, + %0, + iXLen %1) + ret %f +} + +define @test_intrinsic_sf_vfexpa_v_nxv8f16( %0, iXLen %1) { +; CHECK-LABEL: test_intrinsic_sf_vfexpa_v_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: sf.vfexpa.v v8, v8 +; CHECK-NEXT: ret +entry: + %f = call @llvm.riscv.sf.vfexpa.nxv8f16( + undef, + %0, + iXLen %1) + ret %f +} + +define @test_intrinsic_sf_vfexpa_v_nxv16f16( %0, iXLen %1) { +; CHECK-LABEL: test_intrinsic_sf_vfexpa_v_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: sf.vfexpa.v v8, v8 +; CHECK-NEXT: ret +entry: + %f = call @llvm.riscv.sf.vfexpa.nxv16f16( + undef, + %0, + iXLen %1) + ret %f +} + +define @test_intrinsic_sf_vfexpa_v_nxv32f16( %0, iXLen %1) { +; CHECK-LABEL: test_intrinsic_sf_vfexpa_v_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: sf.vfexpa.v v8, v8 +; CHECK-NEXT: ret +entry: + %f = call @llvm.riscv.sf.vfexpa.nxv32f16( + undef, + %0, + iXLen %1) + ret %f +} + +define @test_intrinsic_sf_vfexpa_v_mask_nxv1f16( %0, %1, %m, iXLen %vl) { +; CHECK-LABEL: test_intrinsic_sf_vfexpa_v_mask_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu +; CHECK-NEXT: sf.vfexpa.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %f = call @llvm.riscv.sf.vfexpa.mask.nxv1f16( + %0, + %1, + %m, + iXLen %vl, + iXLen 0) + ret %f +} + +define @test_intrinsic_sf_vfexpa_v_mask_nxv2f16( %0, %1, %m, iXLen %vl) { +; CHECK-LABEL: test_intrinsic_sf_vfexpa_v_mask_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu +; CHECK-NEXT: sf.vfexpa.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %f = call @llvm.riscv.sf.vfexpa.mask.nxv2f16( + %0, + %1, + %m, + iXLen %vl, + iXLen 0) + ret %f +} + +define @test_intrinsic_sf_vfexpa_v_mask_nxv4f16( %0, %1, %m, iXLen %vl) { +; CHECK-LABEL: test_intrinsic_sf_vfexpa_v_mask_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu +; CHECK-NEXT: sf.vfexpa.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %f = call @llvm.riscv.sf.vfexpa.mask.nxv4f16( + %0, + %1, + %m, + iXLen %vl, + iXLen 0) + ret %f +} + +define @test_intrinsic_sf_vfexpa_v_mask_nxv8f16( %0, %1, %m, iXLen %vl) { +; CHECK-LABEL: test_intrinsic_sf_vfexpa_v_mask_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu +; CHECK-NEXT: sf.vfexpa.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %f = call @llvm.riscv.sf.vfexpa.mask.nxv8f16( + %0, + %1, + %m, + iXLen %vl, + iXLen 0) + ret %f +} + +define @test_intrinsic_sf_vfexpa_v_mask_nxv16f16( %0, %1, %m, iXLen %vl) { +; CHECK-LABEL: test_intrinsic_sf_vfexpa_v_mask_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu +; CHECK-NEXT: sf.vfexpa.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %f = call @llvm.riscv.sf.vfexpa.mask.nxv16f16( + %0, + %1, + %m, + iXLen %vl, + iXLen 0) + ret %f +} + +define @test_intrinsic_sf_vfexpa_v_mask_nxv32f16( %0, %1, %m, iXLen %vl) { +; CHECK-LABEL: test_intrinsic_sf_vfexpa_v_mask_nxv32f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, mu +; CHECK-NEXT: sf.vfexpa.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %f = call @llvm.riscv.sf.vfexpa.mask.nxv32f16( + %0, + %1, + %m, + iXLen %vl, + iXLen 0) + ret %f +} diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vfexpa64e.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vfexpa64e.ll new file mode 100644 index 0000000000000..59614bdf2b20b --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/sf_vfexpa64e.ll @@ -0,0 +1,125 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 +; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+xsfvfexpa64e \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK +; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xsfvfexpa64e \ +; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK + +define @test_intrinsic_sf_vfexpa_v_nxv1f64( %0, iXLen %1) { +; CHECK-LABEL: test_intrinsic_sf_vfexpa_v_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: sf.vfexpa.v v8, v8 +; CHECK-NEXT: ret +entry: + %f = call @llvm.riscv.sf.vfexpa.nxv1f64( + undef, + %0, + iXLen %1) + ret %f +} + +define @test_intrinsic_sf_vfexpa_v_nxv2f64( %0, iXLen %1) { +; CHECK-LABEL: test_intrinsic_sf_vfexpa_v_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: sf.vfexpa.v v8, v8 +; CHECK-NEXT: ret +entry: + %f = call @llvm.riscv.sf.vfexpa.nxv2f64( + undef, + %0, + iXLen %1) + ret %f +} + +define @test_intrinsic_sf_vfexpa_v_nxv4f64( %0, iXLen %1) { +; CHECK-LABEL: test_intrinsic_sf_vfexpa_v_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: sf.vfexpa.v v8, v8 +; CHECK-NEXT: ret +entry: + %f = call @llvm.riscv.sf.vfexpa.nxv4f64( + undef, + %0, + iXLen %1) + ret %f +} + +define @test_intrinsic_sf_vfexpa_v_nxv8f64( %0, iXLen %1) { +; CHECK-LABEL: test_intrinsic_sf_vfexpa_v_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: sf.vfexpa.v v8, v8 +; CHECK-NEXT: ret +entry: + %f = call @llvm.riscv.sf.vfexpa.nxv8f64( + undef, + %0, + iXLen %1) + ret %f +} + +define @test_intrinsic_sf_vfexpa_v_mask_nxv1f64( %0, %1, %m, iXLen %vl) { +; CHECK-LABEL: test_intrinsic_sf_vfexpa_v_mask_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu +; CHECK-NEXT: sf.vfexpa.v v8, v9, v0.t +; CHECK-NEXT: ret +entry: + %f = call @llvm.riscv.sf.vfexpa.mask.nxv1f64( + %0, + %1, + %m, + iXLen %vl, + iXLen 0) + ret %f +} + +define @test_intrinsic_sf_vfexpa_v_mask_nxv2f64( %0, %1, %m, iXLen %vl) { +; CHECK-LABEL: test_intrinsic_sf_vfexpa_v_mask_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu +; CHECK-NEXT: sf.vfexpa.v v8, v10, v0.t +; CHECK-NEXT: ret +entry: + %f = call @llvm.riscv.sf.vfexpa.mask.nxv2f64( + %0, + %1, + %m, + iXLen %vl, + iXLen 0) + ret %f +} + +define @test_intrinsic_sf_vfexpa_v_mask_nxv4f64( %0, %1, %m, iXLen %vl) { +; CHECK-LABEL: test_intrinsic_sf_vfexpa_v_mask_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu +; CHECK-NEXT: sf.vfexpa.v v8, v12, v0.t +; CHECK-NEXT: ret +entry: + %f = call @llvm.riscv.sf.vfexpa.mask.nxv4f64( + %0, + %1, + %m, + iXLen %vl, + iXLen 0) + ret %f +} + +define @test_intrinsic_sf_vfexpa_v_mask_nxv8f64( %0, %1, %m, iXLen %vl) { +; CHECK-LABEL: test_intrinsic_sf_vfexpa_v_mask_nxv8f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, mu +; CHECK-NEXT: sf.vfexpa.v v8, v16, v0.t +; CHECK-NEXT: ret +entry: + %f = call @llvm.riscv.sf.vfexpa.mask.nxv8f64( + %0, + %1, + %m, + iXLen %vl, + iXLen 0) + ret %f +}