diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td index 4822d5e5745e84..81517a0255fde7 100644 --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -771,6 +771,15 @@ let TargetPrefix = "riscv" in { defm vfwcvt_rtz_x_f_v : RISCVConversion; defm vfwcvt_f_f_v : RISCVConversion; + defm vfncvt_f_xu_w : RISCVConversion; + defm vfncvt_f_x_w : RISCVConversion; + defm vfncvt_xu_f_w : RISCVConversion; + defm vfncvt_x_f_w : RISCVConversion; + defm vfncvt_rtz_xu_f_w : RISCVConversion; + defm vfncvt_rtz_x_f_w : RISCVConversion; + defm vfncvt_f_f_w : RISCVConversion; + defm vfncvt_rod_f_f_w : RISCVConversion; + // Output: (vector) // Input: (mask type input, vl) def int_riscv_viota : Intrinsic<[llvm_anyvector_ty], diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index 42af1d4fc8b097..44069d732799e0 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -1314,6 +1314,12 @@ multiclass VPseudoConversionW_V { defm _V : VPseudoConversion; } +multiclass VPseudoConversionV_W { + defvar constraint = "@earlyclobber $rd"; + foreach m = MxList.m[0-5] in + defm _W : VPseudoConversion; +} + //===----------------------------------------------------------------------===// // Helpers to define the intrinsic patterns. //===----------------------------------------------------------------------===// @@ -2290,6 +2296,42 @@ multiclass VPatConversionWF_VF { } } +multiclass VPatConversionVI_WF { + foreach vtiToWti = AllWidenableIntToFloatVectors in + { + defvar vti = vtiToWti.Vti; + defvar fwti = vtiToWti.Wti; + + defm : VPatConversion; + } +} + +multiclass VPatConversionVF_WI { + foreach fvtiToFWti = AllWidenableFloatVectors in + { + defvar fvti = fvtiToFWti.Vti; + defvar iwti = GetIntVTypeInfo.Vti; + + defm : VPatConversion; + } +} + +multiclass VPatConversionVF_WF { + foreach fvtiToFWti = AllWidenableFloatVectors in + { + defvar fvti = fvtiToFWti.Vti; + defvar fwti = fvtiToFWti.Wti; + + defm : VPatConversion; + } +} + //===----------------------------------------------------------------------===// // Pseudo instructions and patterns. //===----------------------------------------------------------------------===// @@ -2636,6 +2678,18 @@ defm PseudoVFWCVT_RTZ_X_F : VPseudoConversionW_V; defm PseudoVFWCVT_F_XU : VPseudoConversionW_V; defm PseudoVFWCVT_F_X : VPseudoConversionW_V; defm PseudoVFWCVT_F_F : VPseudoConversionW_V; + +//===----------------------------------------------------------------------===// +// 14.19. Narrowing Floating-Point/Integer Type-Convert Instructions +//===----------------------------------------------------------------------===// +defm PseudoVFNCVT_XU_F : VPseudoConversionV_W; +defm PseudoVFNCVT_X_F : VPseudoConversionV_W; +defm PseudoVFNCVT_RTZ_XU_F : VPseudoConversionV_W; +defm PseudoVFNCVT_RTZ_X_F : VPseudoConversionV_W; +defm PseudoVFNCVT_F_XU : VPseudoConversionV_W; +defm PseudoVFNCVT_F_X : VPseudoConversionV_W; +defm PseudoVFNCVT_F_F : VPseudoConversionV_W; +defm PseudoVFNCVT_ROD_F_F : VPseudoConversionV_W; } // Predicates = [HasStdExtV, HasStdExtF] let Predicates = [HasStdExtV] in { @@ -3200,6 +3254,18 @@ defm "" : VPatConversionWI_VF<"int_riscv_vfwcvt_rtz_x_f_v", "PseudoVFWCVT_RTZ_X_ defm "" : VPatConversionWF_VI<"int_riscv_vfwcvt_f_xu_v", "PseudoVFWCVT_F_XU">; defm "" : VPatConversionWF_VI<"int_riscv_vfwcvt_f_x_v", "PseudoVFWCVT_F_X">; defm "" : VPatConversionWF_VF<"int_riscv_vfwcvt_f_f_v", "PseudoVFWCVT_F_F">; + +//===----------------------------------------------------------------------===// +// 14.19. Narrowing Floating-Point/Integer Type-Convert Instructions +//===----------------------------------------------------------------------===// +defm "" : VPatConversionVI_WF<"int_riscv_vfncvt_xu_f_w", "PseudoVFNCVT_XU_F">; +defm "" : VPatConversionVI_WF<"int_riscv_vfncvt_x_f_w", "PseudoVFNCVT_X_F">; +defm "" : VPatConversionVI_WF<"int_riscv_vfncvt_rtz_xu_f_w", "PseudoVFNCVT_RTZ_XU_F">; +defm "" : VPatConversionVI_WF<"int_riscv_vfncvt_rtz_x_f_w", "PseudoVFNCVT_RTZ_X_F">; +defm "" : VPatConversionVF_WI <"int_riscv_vfncvt_f_xu_w", "PseudoVFNCVT_F_XU">; +defm "" : VPatConversionVF_WI <"int_riscv_vfncvt_f_x_w", "PseudoVFNCVT_F_X">; +defm "" : VPatConversionVF_WF<"int_riscv_vfncvt_f_f_w", "PseudoVFNCVT_F_F">; +defm "" : VPatConversionVF_WF<"int_riscv_vfncvt_rod_f_f_w", "PseudoVFNCVT_ROD_F_F">; } // Predicates = [HasStdExtV, HasStdExtF] let Predicates = [HasStdExtV] in { diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll new file mode 100644 index 00000000000000..5da6960fa8d89a --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv32.ll @@ -0,0 +1,325 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32( + , + i32); + +define @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32( + , + i32); + +define @intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32( + , + i32); + +define @intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32( + , + i32); + +define @intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32( + , + i32); + +define @intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64( + , + i32); + +define @intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64( + , + i32); + +define @intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64( + , + i32); + +define @intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64( + , + i32); + +define @intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64( + %0, + %1, + %2, + i32 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll new file mode 100644 index 00000000000000..4cf5e0ec5fc2ed --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-f-rv64.ll @@ -0,0 +1,325 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32( + , + i64); + +define @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32( + , + i64); + +define @intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv2f16_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f16.nxv2f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32( + , + i64); + +define @intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv4f16_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f16.nxv4f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32( + , + i64); + +define @intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv8f16_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f16.nxv8f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32( + , + i64); + +define @intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv16f16_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv16f16.nxv16f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64( + , + i64); + +define @intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f32_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f32.nxv1f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64( + , + i64); + +define @intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv2f32_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv2f32.nxv2f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64( + , + i64); + +define @intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv4f32_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv4f32.nxv4f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64( + , + i64); + +define @intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv8f32_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu +; CHECK: vfncvt.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv8f32.nxv8f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll new file mode 100644 index 00000000000000..a0c443685532d5 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv32.ll @@ -0,0 +1,181 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32( + , + i32); + +define @intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfncvt.f.x.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu +; CHECK: vfncvt.f.x.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32( + , + i32); + +define @intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfncvt.f.x.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu +; CHECK: vfncvt.f.x.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32( + , + i32); + +define @intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfncvt.f.x.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu +; CHECK: vfncvt.f.x.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32( + , + i32); + +define @intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfncvt.f.x.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu +; CHECK: vfncvt.f.x.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32( + , + i32); + +define @intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfncvt.f.x.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu +; CHECK: vfncvt.f.x.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll new file mode 100644 index 00000000000000..1cca6082438346 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-x-rv64.ll @@ -0,0 +1,325 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32( + , + i64); + +define @intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfncvt.f.x.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu +; CHECK: vfncvt.f.x.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32( + , + i64); + +define @intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfncvt.f.x.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv2f16_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu +; CHECK: vfncvt.f.x.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f16.nxv2i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32( + , + i64); + +define @intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfncvt.f.x.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv4f16_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu +; CHECK: vfncvt.f.x.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f16.nxv4i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32( + , + i64); + +define @intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfncvt.f.x.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv8f16_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu +; CHECK: vfncvt.f.x.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f16.nxv8i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32( + , + i64); + +define @intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfncvt.f.x.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv16f16_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu +; CHECK: vfncvt.f.x.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv16f16.nxv16i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64( + , + i64); + +define @intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfncvt.f.x.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f32_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu +; CHECK: vfncvt.f.x.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f32.nxv1i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64( + , + i64); + +define @intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfncvt.f.x.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv2f32_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu +; CHECK: vfncvt.f.x.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv2f32.nxv2i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64( + , + i64); + +define @intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfncvt.f.x.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv4f32_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu +; CHECK: vfncvt.f.x.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv4f32.nxv4i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64( + , + i64); + +define @intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfncvt.f.x.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv8f32_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu +; CHECK: vfncvt.f.x.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv8f32.nxv8i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll new file mode 100644 index 00000000000000..117230db9d5ec0 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv32.ll @@ -0,0 +1,181 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32( + , + i32); + +define @intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfncvt.f.xu.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu +; CHECK: vfncvt.f.xu.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32( + , + i32); + +define @intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfncvt.f.xu.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu +; CHECK: vfncvt.f.xu.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32( + , + i32); + +define @intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfncvt.f.xu.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu +; CHECK: vfncvt.f.xu.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32( + , + i32); + +define @intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfncvt.f.xu.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu +; CHECK: vfncvt.f.xu.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32( + , + i32); + +define @intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfncvt.f.xu.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu +; CHECK: vfncvt.f.xu.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32( + %0, + %1, + %2, + i32 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll new file mode 100644 index 00000000000000..7a78fddb90b01f --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-f-xu-rv64.ll @@ -0,0 +1,325 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32( + , + i64); + +define @intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfncvt.f.xu.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu +; CHECK: vfncvt.f.xu.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32( + , + i64); + +define @intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfncvt.f.xu.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu +; CHECK: vfncvt.f.xu.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32( + , + i64); + +define @intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfncvt.f.xu.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu +; CHECK: vfncvt.f.xu.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32( + , + i64); + +define @intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfncvt.f.xu.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu +; CHECK: vfncvt.f.xu.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32( + , + i64); + +define @intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfncvt.f.xu.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu +; CHECK: vfncvt.f.xu.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64( + , + i64); + +define @intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfncvt.f.xu.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu +; CHECK: vfncvt.f.xu.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64( + , + i64); + +define @intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfncvt.f.xu.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu +; CHECK: vfncvt.f.xu.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64( + , + i64); + +define @intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfncvt.f.xu.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu +; CHECK: vfncvt.f.xu.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64( + , + i64); + +define @intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfncvt.f.xu.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu +; CHECK: vfncvt.f.xu.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64( + %0, + %1, + %2, + i64 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll new file mode 100644 index 00000000000000..8534d11b6695e5 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv32.ll @@ -0,0 +1,325 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32( + , + i32); + +define @intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f16_nxv1f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv1f16_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32( + , + i32); + +define @intrinsic_vfncvt_rod.f.f.w_nxv2f16_nxv2f32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv2f16_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv2f16_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32( + , + i32); + +define @intrinsic_vfncvt_rod.f.f.w_nxv4f16_nxv4f32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv4f16_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f16_nxv4f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv4f16_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32( + , + i32); + +define @intrinsic_vfncvt_rod.f.f.w_nxv8f16_nxv8f32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv8f16_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f16_nxv8f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv8f16_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32( + , + i32); + +define @intrinsic_vfncvt_rod.f.f.w_nxv16f16_nxv16f32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv16f16_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv16f16_nxv16f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv16f16_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64( + , + i32); + +define @intrinsic_vfncvt_rod.f.f.w_nxv1f32_nxv1f64( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f32_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv1f32_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64( + , + i32); + +define @intrinsic_vfncvt_rod.f.f.w_nxv2f32_nxv2f64( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv2f32_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f32_nxv2f64( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv2f32_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64( + , + i32); + +define @intrinsic_vfncvt_rod.f.f.w_nxv4f32_nxv4f64( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv4f32_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f32_nxv4f64( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv4f32_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64( + , + i32); + +define @intrinsic_vfncvt_rod.f.f.w_nxv8f32_nxv8f64( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv8f32_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f32_nxv8f64( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv8f32_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64( + %0, + %1, + %2, + i32 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll new file mode 100644 index 00000000000000..3c74a723a39588 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rod-f-f-rv64.ll @@ -0,0 +1,325 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32( + , + i64); + +define @intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f16_nxv1f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv1f16_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32( + , + i64); + +define @intrinsic_vfncvt_rod.f.f.w_nxv2f16_nxv2f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv2f16_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f16_nxv2f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv2f16_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32( + , + i64); + +define @intrinsic_vfncvt_rod.f.f.w_nxv4f16_nxv4f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv4f16_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f16_nxv4f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv4f16_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32( + , + i64); + +define @intrinsic_vfncvt_rod.f.f.w_nxv8f16_nxv8f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv8f16_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f16_nxv8f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv8f16_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32( + , + i64); + +define @intrinsic_vfncvt_rod.f.f.w_nxv16f16_nxv16f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv16f16_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv16f16_nxv16f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv16f16_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64( + , + i64); + +define @intrinsic_vfncvt_rod.f.f.w_nxv1f32_nxv1f64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f32_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv1f32_nxv1f64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv1f32_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64( + , + i64); + +define @intrinsic_vfncvt_rod.f.f.w_nxv2f32_nxv2f64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv2f32_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv2f32_nxv2f64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv2f32_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64( + , + i64); + +define @intrinsic_vfncvt_rod.f.f.w_nxv4f32_nxv4f64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv4f32_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv4f32_nxv4f64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv4f32_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64( + , + i64); + +define @intrinsic_vfncvt_rod.f.f.w_nxv8f32_nxv8f64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv8f32_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rod.f.f.w_nxv8f32_nxv8f64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rod.f.f.w_nxv8f32_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu +; CHECK: vfncvt.rod.f.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll new file mode 100644 index 00000000000000..59a323a707a214 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv32.ll @@ -0,0 +1,541 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16( + , + i32); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16( + , + i32); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16( + , + i32); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16( + , + i32); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16( + , + i32); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16( + , + i32); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32( + , + i32); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv1i16_nxv1f32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i16_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i16_nxv1f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i16_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32( + , + i32); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv2i16_nxv2f32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i16_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i16_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32( + , + i32); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv4i16_nxv4f32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i16_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i16_nxv4f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i16_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32( + , + i32); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv8i16_nxv8f32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i16_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i16_nxv8f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i16_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32( + , + i32); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv16i16_nxv16f32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv16i16_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i16_nxv16f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i16_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64( + , + i32); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv1i32_nxv1f64( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i32_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i32_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64( + , + i32); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv2i32_nxv2f64( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i32_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i32_nxv2f64( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i32_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64( + , + i32); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv4i32_nxv4f64( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i32_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i32_nxv4f64( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i32_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64( + , + i32); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv8i32_nxv8f64( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i32_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i32_nxv8f64( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i32_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64( + %0, + %1, + %2, + i32 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll new file mode 100644 index 00000000000000..663ba4d969f9ec --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-x-f-rv64.ll @@ -0,0 +1,541 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16( + , + i64); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i8_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i8.nxv1f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16( + , + i64); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i8_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i8.nxv2f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16( + , + i64); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i8_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i8.nxv4f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16( + , + i64); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i8_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i8.nxv8f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16( + , + i64); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i8_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i8.nxv16f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16( + , + i64); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv32i8_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv32i8.nxv32f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32( + , + i64); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv1i16_nxv1f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i16_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i16_nxv1f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i16_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i16.nxv1f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32( + , + i64); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv2i16_nxv2f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i16_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i16_nxv2f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i16_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i16.nxv2f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32( + , + i64); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv4i16_nxv4f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i16_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i16_nxv4f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i16_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i16.nxv4f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32( + , + i64); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv8i16_nxv8f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i16_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i16_nxv8f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i16_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i16.nxv8f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32( + , + i64); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv16i16_nxv16f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv16i16_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i16_nxv16f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv16i16_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv16i16.nxv16f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64( + , + i64); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv1i32_nxv1f64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i32_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i32_nxv1f64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv1i32_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv1i32.nxv1f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64( + , + i64); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv2i32_nxv2f64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv2i32_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i32_nxv2f64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv2i32_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv2i32.nxv2f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64( + , + i64); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv4i32_nxv4f64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv4i32_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i32_nxv4f64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv4i32_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv4i32.nxv4f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64( + , + i64); + +define @intrinsic_vfncvt_rtz.x.f.w_nxv8i32_nxv8f64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv8i32_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i32_nxv8f64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.x.f.w_nxv8i32_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu +; CHECK: vfncvt.rtz.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.x.f.w.mask.nxv8i32.nxv8f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll new file mode 100644 index 00000000000000..c72e65b5ddae9f --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv32.ll @@ -0,0 +1,541 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16( + , + i32); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16( + , + i32); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16( + , + i32); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16( + , + i32); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16( + , + i32); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16( + , + i32); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32( + , + i32); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i16_nxv1f32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i16_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i16_nxv1f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i16_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32( + , + i32); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i16_nxv2f32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i16_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i16_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32( + , + i32); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i16_nxv4f32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i16_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i16_nxv4f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i16_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32( + , + i32); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i16_nxv8f32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i16_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i16_nxv8f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i16_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32( + , + i32); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv16i16_nxv16f32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv16i16_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i16_nxv16f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i16_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64( + , + i32); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i32_nxv1f64( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i32_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i32_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64( + , + i32); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i32_nxv2f64( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i32_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i32_nxv2f64( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i32_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64( + , + i32); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i32_nxv4f64( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i32_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i32_nxv4f64( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i32_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64( + , + i32); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i32_nxv8f64( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i32_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i32_nxv8f64( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i32_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64( + %0, + %1, + %2, + i32 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll new file mode 100644 index 00000000000000..d5bd9542b6c188 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-rtz-xu-f-rv64.ll @@ -0,0 +1,541 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16( + , + i64); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i8_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i8.nxv1f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16( + , + i64); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i8_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i8.nxv2f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16( + , + i64); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i8_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i8.nxv4f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16( + , + i64); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i8_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i8.nxv8f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16( + , + i64); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i8_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i8.nxv16f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16( + , + i64); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv32i8_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv32i8.nxv32f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32( + , + i64); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i16_nxv1f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i16_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i16_nxv1f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i16_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i16.nxv1f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32( + , + i64); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i16_nxv2f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i16_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i16_nxv2f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i16_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i16.nxv2f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32( + , + i64); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i16_nxv4f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i16_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i16_nxv4f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i16_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i16.nxv4f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32( + , + i64); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i16_nxv8f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i16_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i16_nxv8f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i16_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i16.nxv8f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32( + , + i64); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv16i16_nxv16f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv16i16_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i16_nxv16f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv16i16_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv16i16.nxv16f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64( + , + i64); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv1i32_nxv1f64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i32_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i32_nxv1f64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv1i32_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv1i32.nxv1f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64( + , + i64); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv2i32_nxv2f64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv2i32_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i32_nxv2f64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv2i32_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv2i32.nxv2f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64( + , + i64); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv4i32_nxv4f64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv4i32_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i32_nxv4f64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv4i32_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv4i32.nxv4f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64( + , + i64); + +define @intrinsic_vfncvt_rtz.xu.f.w_nxv8i32_nxv8f64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv8i32_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i32_nxv8f64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_rtz.xu.f.w_nxv8i32_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu +; CHECK: vfncvt.rtz.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.rtz.xu.f.w.mask.nxv8i32.nxv8f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll new file mode 100644 index 00000000000000..4a7695cc5926fd --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv32.ll @@ -0,0 +1,541 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16( + , + i32); + +define @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16( + , + i32); + +define @intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16( + , + i32); + +define @intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16( + , + i32); + +define @intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16( + , + i32); + +define @intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16( + , + i32); + +define @intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32( + , + i32); + +define @intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32( + , + i32); + +define @intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32( + , + i32); + +define @intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32( + , + i32); + +define @intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32( + , + i32); + +define @intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64( + , + i32); + +define @intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64( + , + i32); + +define @intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64( + , + i32); + +define @intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64( + , + i32); + +define @intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64( + %0, + %1, + %2, + i32 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll new file mode 100644 index 00000000000000..7340f09b36f786 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-x-f-rv64.ll @@ -0,0 +1,541 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16( + , + i64); + +define @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i8_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i8.nxv1f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16( + , + i64); + +define @intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i8_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i8.nxv2f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16( + , + i64); + +define @intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i8_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i8.nxv4f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16( + , + i64); + +define @intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i8_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i8.nxv8f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16( + , + i64); + +define @intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i8_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i8.nxv16f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16( + , + i64); + +define @intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv32i8_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv32i8.nxv32f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32( + , + i64); + +define @intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i16_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i16.nxv1f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32( + , + i64); + +define @intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i16_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i16.nxv2f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32( + , + i64); + +define @intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i16_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i16.nxv4f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32( + , + i64); + +define @intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i16_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i16.nxv8f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32( + , + i64); + +define @intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv16i16_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv16i16.nxv16f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64( + , + i64); + +define @intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv1i32_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv1i32.nxv1f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64( + , + i64); + +define @intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv2i32_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv2i32.nxv2f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64( + , + i64); + +define @intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv4i32_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv4i32.nxv4f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64( + , + i64); + +define @intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_x.f.w_nxv8i32_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu +; CHECK: vfncvt.x.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.x.f.w.mask.nxv8i32.nxv8f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll new file mode 100644 index 00000000000000..f69ac086c101a1 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv32.ll @@ -0,0 +1,541 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16( + , + i32); + +define @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16( + , + i32); + +define @intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16( + , + i32); + +define @intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16( + , + i32); + +define @intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16( + , + i32); + +define @intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16( + , + i32); + +define @intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32( + , + i32); + +define @intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32( + , + i32); + +define @intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32( + , + i32); + +define @intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32( + , + i32); + +define @intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32( + , + i32); + +define @intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64( + , + i32); + +define @intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64( + , + i32); + +define @intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64( + , + i32); + +define @intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64( + %0, + %1, + %2, + i32 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64( + , + i32); + +define @intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64( + %0, + i32 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64( + , + , + , + i32); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64( + %0, + %1, + %2, + i32 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll new file mode 100644 index 00000000000000..55f2c5c0cc653a --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfncvt-xu-f-rv64.ll @@ -0,0 +1,541 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16( + , + i64); + +define @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16( + , + i64); + +define @intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16( + , + i64); + +define @intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16( + , + i64); + +define @intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16( + , + i64); + +define @intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16( + , + i64); + +define @intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32( + , + i64); + +define @intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32( + , + i64); + +define @intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32( + , + i64); + +define @intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32( + , + i64); + +define @intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32( + , + i64); + +define @intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64( + , + i64); + +define @intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64( + , + i64); + +define @intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64( + , + i64); + +define @intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64( + %0, + %1, + %2, + i64 %3) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64( + , + i64); + +define @intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64( + %0, + i64 %1) + + ret %a +} + +declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64( + , + , + , + i64); + +define @intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu +; CHECK: vfncvt.xu.f.w {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64( + %0, + %1, + %2, + i64 %3) + + ret %a +}