44 changes: 22 additions & 22 deletions llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16(
define <vscale x 1 x half> @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16(
<vscale x 1 x half> %0,
Expand Down Expand Up @@ -68,7 +68,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16(
define <vscale x 2 x half> @intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f16_nxv2f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16(
<vscale x 2 x half> %0,
Expand Down Expand Up @@ -108,7 +108,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16(
define <vscale x 4 x half> @intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f16_nxv4f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16(
<vscale x 4 x half> %0,
Expand Down Expand Up @@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16(
define <vscale x 8 x half> @intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f16_nxv8f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16(
<vscale x 8 x half> %0,
Expand Down Expand Up @@ -188,7 +188,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16(
define <vscale x 16 x half> @intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f16_nxv16f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16(
<vscale x 16 x half> %0,
Expand Down Expand Up @@ -228,7 +228,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16(
define <vscale x 32 x half> @intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv32f16_nxv32f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16(
<vscale x 32 x half> %0,
Expand Down Expand Up @@ -268,7 +268,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32(
define <vscale x 1 x float> @intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f32_nxv1f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32(
<vscale x 1 x float> %0,
Expand Down Expand Up @@ -308,7 +308,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32(
define <vscale x 2 x float> @intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv2f32_nxv2f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32(
<vscale x 2 x float> %0,
Expand Down Expand Up @@ -348,7 +348,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32(
define <vscale x 4 x float> @intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv4f32_nxv4f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32(
<vscale x 4 x float> %0,
Expand Down Expand Up @@ -388,7 +388,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32(
define <vscale x 8 x float> @intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv8f32_nxv8f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32(
<vscale x 8 x float> %0,
Expand Down Expand Up @@ -428,7 +428,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32(
define <vscale x 16 x float> @intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv16f32_nxv16f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
; CHECK: vfsgnj.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32(
<vscale x 16 x float> %0,
Expand Down Expand Up @@ -468,7 +468,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.f16(
define <vscale x 1 x half> @intrinsic_vfsgnj_mask_vf_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x half> @llvm.riscv.vfsgnj.mask.nxv1f16.f16(
<vscale x 1 x half> %0,
Expand Down Expand Up @@ -508,7 +508,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16.f16(
define <vscale x 2 x half> @intrinsic_vfsgnj_mask_vf_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x half> @llvm.riscv.vfsgnj.mask.nxv2f16.f16(
<vscale x 2 x half> %0,
Expand Down Expand Up @@ -548,7 +548,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16.f16(
define <vscale x 4 x half> @intrinsic_vfsgnj_mask_vf_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x half> @llvm.riscv.vfsgnj.mask.nxv4f16.f16(
<vscale x 4 x half> %0,
Expand Down Expand Up @@ -588,7 +588,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16.f16(
define <vscale x 8 x half> @intrinsic_vfsgnj_mask_vf_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x half> @llvm.riscv.vfsgnj.mask.nxv8f16.f16(
<vscale x 8 x half> %0,
Expand Down Expand Up @@ -628,7 +628,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16.f16(
define <vscale x 16 x half> @intrinsic_vfsgnj_mask_vf_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 16 x half> @llvm.riscv.vfsgnj.mask.nxv16f16.f16(
<vscale x 16 x half> %0,
Expand Down Expand Up @@ -668,7 +668,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16.f16(
define <vscale x 32 x half> @intrinsic_vfsgnj_mask_vf_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv32f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 32 x half> @llvm.riscv.vfsgnj.mask.nxv32f16.f16(
<vscale x 32 x half> %0,
Expand Down Expand Up @@ -708,7 +708,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.f32(
define <vscale x 1 x float> @intrinsic_vfsgnj_mask_vf_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv1f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfsgnj.mask.nxv1f32.f32(
<vscale x 1 x float> %0,
Expand Down Expand Up @@ -748,7 +748,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.f32(
define <vscale x 2 x float> @intrinsic_vfsgnj_mask_vf_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv2f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfsgnj.mask.nxv2f32.f32(
<vscale x 2 x float> %0,
Expand Down Expand Up @@ -788,7 +788,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.f32(
define <vscale x 4 x float> @intrinsic_vfsgnj_mask_vf_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv4f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfsgnj.mask.nxv4f32.f32(
<vscale x 4 x float> %0,
Expand Down Expand Up @@ -828,7 +828,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.f32(
define <vscale x 8 x float> @intrinsic_vfsgnj_mask_vf_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv8f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfsgnj.mask.nxv8f32.f32(
<vscale x 8 x float> %0,
Expand Down Expand Up @@ -868,7 +868,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.f32(
define <vscale x 16 x float> @intrinsic_vfsgnj_mask_vf_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnj_mask_vf_nxv16f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
; CHECK: vfsgnj.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfsgnj.mask.nxv16f32.f32(
<vscale x 16 x float> %0,
Expand Down
60 changes: 30 additions & 30 deletions llvm/test/CodeGen/RISCV/rvv/vfsgnj-rv64.ll

Large diffs are not rendered by default.

44 changes: 22 additions & 22 deletions llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16(
define <vscale x 1 x half> @intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f16_nxv1f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16(
<vscale x 1 x half> %0,
Expand Down Expand Up @@ -68,7 +68,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16(
define <vscale x 2 x half> @intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f16_nxv2f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16(
<vscale x 2 x half> %0,
Expand Down Expand Up @@ -108,7 +108,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16(
define <vscale x 4 x half> @intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f16_nxv4f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16(
<vscale x 4 x half> %0,
Expand Down Expand Up @@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16(
define <vscale x 8 x half> @intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f16_nxv8f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16(
<vscale x 8 x half> %0,
Expand Down Expand Up @@ -188,7 +188,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16(
define <vscale x 16 x half> @intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f16_nxv16f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16(
<vscale x 16 x half> %0,
Expand Down Expand Up @@ -228,7 +228,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16(
define <vscale x 32 x half> @intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv32f16_nxv32f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16(
<vscale x 32 x half> %0,
Expand Down Expand Up @@ -268,7 +268,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32(
define <vscale x 1 x float> @intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv1f32_nxv1f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32(
<vscale x 1 x float> %0,
Expand Down Expand Up @@ -308,7 +308,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32(
define <vscale x 2 x float> @intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv2f32_nxv2f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32(
<vscale x 2 x float> %0,
Expand Down Expand Up @@ -348,7 +348,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32(
define <vscale x 4 x float> @intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv4f32_nxv4f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32(
<vscale x 4 x float> %0,
Expand Down Expand Up @@ -388,7 +388,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32(
define <vscale x 8 x float> @intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv8f32_nxv8f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32(
<vscale x 8 x float> %0,
Expand Down Expand Up @@ -428,7 +428,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32(
define <vscale x 16 x float> @intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjn_mask_vv_nxv16f32_nxv16f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
; CHECK: vfsgnjn.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32(
<vscale x 16 x float> %0,
Expand Down Expand Up @@ -468,7 +468,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16.f16(
define <vscale x 1 x half> @intrinsic_vfsgnjn_mask_vf_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x half> @llvm.riscv.vfsgnjn.mask.nxv1f16.f16(
<vscale x 1 x half> %0,
Expand Down Expand Up @@ -508,7 +508,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16.f16(
define <vscale x 2 x half> @intrinsic_vfsgnjn_mask_vf_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x half> @llvm.riscv.vfsgnjn.mask.nxv2f16.f16(
<vscale x 2 x half> %0,
Expand Down Expand Up @@ -548,7 +548,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16.f16(
define <vscale x 4 x half> @intrinsic_vfsgnjn_mask_vf_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x half> @llvm.riscv.vfsgnjn.mask.nxv4f16.f16(
<vscale x 4 x half> %0,
Expand Down Expand Up @@ -588,7 +588,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16.f16(
define <vscale x 8 x half> @intrinsic_vfsgnjn_mask_vf_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x half> @llvm.riscv.vfsgnjn.mask.nxv8f16.f16(
<vscale x 8 x half> %0,
Expand Down Expand Up @@ -628,7 +628,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16.f16(
define <vscale x 16 x half> @intrinsic_vfsgnjn_mask_vf_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 16 x half> @llvm.riscv.vfsgnjn.mask.nxv16f16.f16(
<vscale x 16 x half> %0,
Expand Down Expand Up @@ -668,7 +668,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16.f16(
define <vscale x 32 x half> @intrinsic_vfsgnjn_mask_vf_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv32f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 32 x half> @llvm.riscv.vfsgnjn.mask.nxv32f16.f16(
<vscale x 32 x half> %0,
Expand Down Expand Up @@ -708,7 +708,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.f32(
define <vscale x 1 x float> @intrinsic_vfsgnjn_mask_vf_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv1f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfsgnjn.mask.nxv1f32.f32(
<vscale x 1 x float> %0,
Expand Down Expand Up @@ -748,7 +748,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.f32(
define <vscale x 2 x float> @intrinsic_vfsgnjn_mask_vf_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv2f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfsgnjn.mask.nxv2f32.f32(
<vscale x 2 x float> %0,
Expand Down Expand Up @@ -788,7 +788,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.f32(
define <vscale x 4 x float> @intrinsic_vfsgnjn_mask_vf_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv4f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfsgnjn.mask.nxv4f32.f32(
<vscale x 4 x float> %0,
Expand Down Expand Up @@ -828,7 +828,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.f32(
define <vscale x 8 x float> @intrinsic_vfsgnjn_mask_vf_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv8f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfsgnjn.mask.nxv8f32.f32(
<vscale x 8 x float> %0,
Expand Down Expand Up @@ -868,7 +868,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.f32(
define <vscale x 16 x float> @intrinsic_vfsgnjn_mask_vf_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjn_mask_vf_nxv16f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
; CHECK: vfsgnjn.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfsgnjn.mask.nxv16f32.f32(
<vscale x 16 x float> %0,
Expand Down
60 changes: 30 additions & 30 deletions llvm/test/CodeGen/RISCV/rvv/vfsgnjn-rv64.ll

Large diffs are not rendered by default.

44 changes: 22 additions & 22 deletions llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16(
define <vscale x 1 x half> @intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f16_nxv1f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16(
<vscale x 1 x half> %0,
Expand Down Expand Up @@ -68,7 +68,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16(
define <vscale x 2 x half> @intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f16_nxv2f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16(
<vscale x 2 x half> %0,
Expand Down Expand Up @@ -108,7 +108,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16(
define <vscale x 4 x half> @intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f16_nxv4f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16(
<vscale x 4 x half> %0,
Expand Down Expand Up @@ -148,7 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16(
define <vscale x 8 x half> @intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f16_nxv8f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16(
<vscale x 8 x half> %0,
Expand Down Expand Up @@ -188,7 +188,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16(
define <vscale x 16 x half> @intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f16_nxv16f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16(
<vscale x 16 x half> %0,
Expand Down Expand Up @@ -228,7 +228,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16(
define <vscale x 32 x half> @intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv32f16_nxv32f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16(
<vscale x 32 x half> %0,
Expand Down Expand Up @@ -268,7 +268,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32(
define <vscale x 1 x float> @intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv1f32_nxv1f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32(
<vscale x 1 x float> %0,
Expand Down Expand Up @@ -308,7 +308,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32(
define <vscale x 2 x float> @intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv2f32_nxv2f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32(
<vscale x 2 x float> %0,
Expand Down Expand Up @@ -348,7 +348,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32(
define <vscale x 4 x float> @intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv4f32_nxv4f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32(
<vscale x 4 x float> %0,
Expand Down Expand Up @@ -388,7 +388,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32(
define <vscale x 8 x float> @intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv8f32_nxv8f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32(
<vscale x 8 x float> %0,
Expand Down Expand Up @@ -428,7 +428,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32(
define <vscale x 16 x float> @intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjx_mask_vv_nxv16f32_nxv16f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
; CHECK: vfsgnjx.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32(
<vscale x 16 x float> %0,
Expand Down Expand Up @@ -468,7 +468,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16.f16(
define <vscale x 1 x half> @intrinsic_vfsgnjx_mask_vf_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x half> @llvm.riscv.vfsgnjx.mask.nxv1f16.f16(
<vscale x 1 x half> %0,
Expand Down Expand Up @@ -508,7 +508,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16.f16(
define <vscale x 2 x half> @intrinsic_vfsgnjx_mask_vf_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x half> @llvm.riscv.vfsgnjx.mask.nxv2f16.f16(
<vscale x 2 x half> %0,
Expand Down Expand Up @@ -548,7 +548,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16.f16(
define <vscale x 4 x half> @intrinsic_vfsgnjx_mask_vf_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x half> @llvm.riscv.vfsgnjx.mask.nxv4f16.f16(
<vscale x 4 x half> %0,
Expand Down Expand Up @@ -588,7 +588,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16.f16(
define <vscale x 8 x half> @intrinsic_vfsgnjx_mask_vf_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x half> @llvm.riscv.vfsgnjx.mask.nxv8f16.f16(
<vscale x 8 x half> %0,
Expand Down Expand Up @@ -628,7 +628,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16.f16(
define <vscale x 16 x half> @intrinsic_vfsgnjx_mask_vf_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 16 x half> @llvm.riscv.vfsgnjx.mask.nxv16f16.f16(
<vscale x 16 x half> %0,
Expand Down Expand Up @@ -668,7 +668,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16.f16(
define <vscale x 32 x half> @intrinsic_vfsgnjx_mask_vf_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv32f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 32 x half> @llvm.riscv.vfsgnjx.mask.nxv32f16.f16(
<vscale x 32 x half> %0,
Expand Down Expand Up @@ -708,7 +708,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.f32(
define <vscale x 1 x float> @intrinsic_vfsgnjx_mask_vf_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv1f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfsgnjx.mask.nxv1f32.f32(
<vscale x 1 x float> %0,
Expand Down Expand Up @@ -748,7 +748,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32.f32(
define <vscale x 2 x float> @intrinsic_vfsgnjx_mask_vf_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv2f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfsgnjx.mask.nxv2f32.f32(
<vscale x 2 x float> %0,
Expand Down Expand Up @@ -788,7 +788,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32.f32(
define <vscale x 4 x float> @intrinsic_vfsgnjx_mask_vf_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv4f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfsgnjx.mask.nxv4f32.f32(
<vscale x 4 x float> %0,
Expand Down Expand Up @@ -828,7 +828,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32.f32(
define <vscale x 8 x float> @intrinsic_vfsgnjx_mask_vf_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv8f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfsgnjx.mask.nxv8f32.f32(
<vscale x 8 x float> %0,
Expand Down Expand Up @@ -868,7 +868,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32.f32(
define <vscale x 16 x float> @intrinsic_vfsgnjx_mask_vf_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsgnjx_mask_vf_nxv16f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
; CHECK: vfsgnjx.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfsgnjx.mask.nxv16f32.f32(
<vscale x 16 x float> %0,
Expand Down
60 changes: 30 additions & 30 deletions llvm/test/CodeGen/RISCV/rvv/vfsgnjx-rv64.ll

Large diffs are not rendered by default.

22 changes: 11 additions & 11 deletions llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ define <vscale x 1 x half> @intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16(<
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu
; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -79,7 +79,7 @@ define <vscale x 2 x half> @intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16(<
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu
; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -125,7 +125,7 @@ define <vscale x 4 x half> @intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16(<
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu
; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -171,7 +171,7 @@ define <vscale x 8 x half> @intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16(<
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu
; CHECK-NEXT: vfslide1down.vf v16, v18, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -217,7 +217,7 @@ define <vscale x 16 x half> @intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f1
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu
; CHECK-NEXT: vfslide1down.vf v16, v20, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -265,7 +265,7 @@ define <vscale x 32 x half> @intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f1
; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: fmv.h.x ft0, a1
; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu
; CHECK-NEXT: vsetvli a0, a2, e16,m8,tu,mu
; CHECK-NEXT: vfslide1down.vf v16, v8, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -311,7 +311,7 @@ define <vscale x 1 x float> @intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32(
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu
; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -357,7 +357,7 @@ define <vscale x 2 x float> @intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32(
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu
; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -403,7 +403,7 @@ define <vscale x 4 x float> @intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32(
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu
; CHECK-NEXT: vfslide1down.vf v16, v18, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -449,7 +449,7 @@ define <vscale x 8 x float> @intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32(
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu
; CHECK-NEXT: vfslide1down.vf v16, v20, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -497,7 +497,7 @@ define <vscale x 16 x float> @intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f
; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: fmv.w.x ft0, a1
; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu
; CHECK-NEXT: vsetvli a0, a2, e32,m8,tu,mu
; CHECK-NEXT: vfslide1down.vf v16, v8, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down
30 changes: 15 additions & 15 deletions llvm/test/CodeGen/RISCV/rvv/vfslide1down-rv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ define <vscale x 1 x half> @intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16(<
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f16_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu
; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -79,7 +79,7 @@ define <vscale x 2 x half> @intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16(<
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f16_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu
; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -125,7 +125,7 @@ define <vscale x 4 x half> @intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16(<
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f16_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu
; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -171,7 +171,7 @@ define <vscale x 8 x half> @intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16(<
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f16_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu
; CHECK-NEXT: vfslide1down.vf v16, v18, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -217,7 +217,7 @@ define <vscale x 16 x half> @intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f1
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv16f16_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu
; CHECK-NEXT: vfslide1down.vf v16, v20, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -265,7 +265,7 @@ define <vscale x 32 x half> @intrinsic_vfslide1down_mask_vf_nxv32f16_nxv32f16_f1
; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: fmv.h.x ft0, a1
; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu
; CHECK-NEXT: vsetvli a0, a2, e16,m8,tu,mu
; CHECK-NEXT: vfslide1down.vf v16, v8, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -311,7 +311,7 @@ define <vscale x 1 x float> @intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32(
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f32_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu
; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -357,7 +357,7 @@ define <vscale x 2 x float> @intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32(
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f32_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu
; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -403,7 +403,7 @@ define <vscale x 4 x float> @intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32(
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f32_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu
; CHECK-NEXT: vfslide1down.vf v16, v18, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -449,7 +449,7 @@ define <vscale x 8 x float> @intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32(
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv8f32_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu
; CHECK-NEXT: vfslide1down.vf v16, v20, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -497,7 +497,7 @@ define <vscale x 16 x float> @intrinsic_vfslide1down_mask_vf_nxv16f32_nxv16f32_f
; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: fmv.w.x ft0, a1
; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu
; CHECK-NEXT: vsetvli a0, a2, e32,m8,tu,mu
; CHECK-NEXT: vfslide1down.vf v16, v8, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -543,7 +543,7 @@ define <vscale x 1 x double> @intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv1f64_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.d.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e64,m1,tu,mu
; CHECK-NEXT: vfslide1down.vf v16, v17, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -589,7 +589,7 @@ define <vscale x 2 x double> @intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv2f64_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.d.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e64,m2,tu,mu
; CHECK-NEXT: vfslide1down.vf v16, v18, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -635,7 +635,7 @@ define <vscale x 4 x double> @intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64
; CHECK-LABEL: intrinsic_vfslide1down_mask_vf_nxv4f64_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.d.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu
; CHECK-NEXT: vfslide1down.vf v16, v20, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -683,7 +683,7 @@ define <vscale x 8 x double> @intrinsic_vfslide1down_mask_vf_nxv8f64_nxv8f64_f64
; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: fmv.d.x ft0, a1
; CHECK-NEXT: vsetvli a0, a2, e64,m8,ta,mu
; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu
; CHECK-NEXT: vfslide1down.vf v16, v8, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down
22 changes: 11 additions & 11 deletions llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ define <vscale x 1 x half> @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16(<vs
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu
; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -81,7 +81,7 @@ define <vscale x 2 x half> @intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16(<vs
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu
; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -128,7 +128,7 @@ define <vscale x 4 x half> @intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16(<vs
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu
; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -175,7 +175,7 @@ define <vscale x 8 x half> @intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16(<vs
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu
; CHECK-NEXT: vfslide1up.vf v16, v18, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -222,7 +222,7 @@ define <vscale x 16 x half> @intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16(
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu
; CHECK-NEXT: vfslide1up.vf v16, v20, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -271,7 +271,7 @@ define <vscale x 32 x half> @intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16(
; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: fmv.h.x ft0, a1
; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu
; CHECK-NEXT: vsetvli a0, a2, e16,m8,tu,mu
; CHECK-NEXT: vfslide1up.vf v16, v8, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -318,7 +318,7 @@ define <vscale x 1 x float> @intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32(<v
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu
; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -365,7 +365,7 @@ define <vscale x 2 x float> @intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32(<v
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu
; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -412,7 +412,7 @@ define <vscale x 4 x float> @intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32(<v
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu
; CHECK-NEXT: vfslide1up.vf v16, v18, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -459,7 +459,7 @@ define <vscale x 8 x float> @intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32(<v
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu
; CHECK-NEXT: vfslide1up.vf v16, v20, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -508,7 +508,7 @@ define <vscale x 16 x float> @intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32
; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: fmv.w.x ft0, a1
; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu
; CHECK-NEXT: vsetvli a0, a2, e32,m8,tu,mu
; CHECK-NEXT: vfslide1up.vf v16, v8, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down
30 changes: 15 additions & 15 deletions llvm/test/CodeGen/RISCV/rvv/vfslide1up-rv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ define <vscale x 1 x half> @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16(<vs
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e16,mf4,tu,mu
; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -81,7 +81,7 @@ define <vscale x 2 x half> @intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16(<vs
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e16,mf2,tu,mu
; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -128,7 +128,7 @@ define <vscale x 4 x half> @intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16(<vs
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e16,m1,tu,mu
; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -175,7 +175,7 @@ define <vscale x 8 x half> @intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16(<vs
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e16,m2,tu,mu
; CHECK-NEXT: vfslide1up.vf v16, v18, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -222,7 +222,7 @@ define <vscale x 16 x half> @intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16(
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.h.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e16,m4,tu,mu
; CHECK-NEXT: vfslide1up.vf v16, v20, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -271,7 +271,7 @@ define <vscale x 32 x half> @intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16(
; CHECK-NEXT: vsetvli a3, zero, e16,m8,ta,mu
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: fmv.h.x ft0, a1
; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu
; CHECK-NEXT: vsetvli a0, a2, e16,m8,tu,mu
; CHECK-NEXT: vfslide1up.vf v16, v8, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -318,7 +318,7 @@ define <vscale x 1 x float> @intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32(<v
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e32,mf2,tu,mu
; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -365,7 +365,7 @@ define <vscale x 2 x float> @intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32(<v
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e32,m1,tu,mu
; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -412,7 +412,7 @@ define <vscale x 4 x float> @intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32(<v
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e32,m2,tu,mu
; CHECK-NEXT: vfslide1up.vf v16, v18, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -459,7 +459,7 @@ define <vscale x 8 x float> @intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32(<v
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.w.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e32,m4,tu,mu
; CHECK-NEXT: vfslide1up.vf v16, v20, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -508,7 +508,7 @@ define <vscale x 16 x float> @intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32
; CHECK-NEXT: vsetvli a3, zero, e32,m8,ta,mu
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: fmv.w.x ft0, a1
; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu
; CHECK-NEXT: vsetvli a0, a2, e32,m8,tu,mu
; CHECK-NEXT: vfslide1up.vf v16, v8, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -555,7 +555,7 @@ define <vscale x 1 x double> @intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64(<
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.d.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e64,m1,tu,mu
; CHECK-NEXT: vfslide1up.vf v16, v17, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -602,7 +602,7 @@ define <vscale x 2 x double> @intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64(<
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.d.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e64,m2,tu,mu
; CHECK-NEXT: vfslide1up.vf v16, v18, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -649,7 +649,7 @@ define <vscale x 4 x double> @intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64(<
; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: fmv.d.x ft0, a0
; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
; CHECK-NEXT: vsetvli a0, a1, e64,m4,tu,mu
; CHECK-NEXT: vfslide1up.vf v16, v20, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down Expand Up @@ -698,7 +698,7 @@ define <vscale x 8 x double> @intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64(<
; CHECK-NEXT: vsetvli a3, zero, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: fmv.d.x ft0, a1
; CHECK-NEXT: vsetvli a0, a2, e64,m8,ta,mu
; CHECK-NEXT: vsetvli a0, a2, e64,m8,tu,mu
; CHECK-NEXT: vfslide1up.vf v16, v8, ft0, v0.t
; CHECK-NEXT: jalr zero, 0(ra)
entry:
Expand Down
44 changes: 22 additions & 22 deletions llvm/test/CodeGen/RISCV/rvv/vfsub-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16(
define <vscale x 1 x half> @intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16(
<vscale x 1 x half> %0,
Expand Down Expand Up @@ -69,7 +69,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16(
define <vscale x 2 x half> @intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16(
<vscale x 2 x half> %0,
Expand Down Expand Up @@ -109,7 +109,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16(
define <vscale x 4 x half> @intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16(
<vscale x 4 x half> %0,
Expand Down Expand Up @@ -149,7 +149,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16(
define <vscale x 8 x half> @intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16(
<vscale x 8 x half> %0,
Expand Down Expand Up @@ -189,7 +189,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16(
define <vscale x 16 x half> @intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16(
<vscale x 16 x half> %0,
Expand Down Expand Up @@ -229,7 +229,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16(
define <vscale x 32 x half> @intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16(
<vscale x 32 x half> %0,
Expand Down Expand Up @@ -269,7 +269,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32(
define <vscale x 1 x float> @intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32(
<vscale x 1 x float> %0,
Expand Down Expand Up @@ -309,7 +309,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32(
define <vscale x 2 x float> @intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32(
<vscale x 2 x float> %0,
Expand Down Expand Up @@ -349,7 +349,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32(
define <vscale x 4 x float> @intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32(
<vscale x 4 x float> %0,
Expand Down Expand Up @@ -389,7 +389,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32(
define <vscale x 8 x float> @intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32(
<vscale x 8 x float> %0,
Expand Down Expand Up @@ -429,7 +429,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32(
define <vscale x 16 x float> @intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32(
<vscale x 16 x float> %0,
Expand Down Expand Up @@ -469,7 +469,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.f16(
define <vscale x 1 x half> @intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.f16(
<vscale x 1 x half> %0,
Expand Down Expand Up @@ -509,7 +509,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.f16(
define <vscale x 2 x half> @intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.f16(
<vscale x 2 x half> %0,
Expand Down Expand Up @@ -549,7 +549,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.f16(
define <vscale x 4 x half> @intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.f16(
<vscale x 4 x half> %0,
Expand Down Expand Up @@ -589,7 +589,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.f16(
define <vscale x 8 x half> @intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.f16(
<vscale x 8 x half> %0,
Expand Down Expand Up @@ -629,7 +629,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.f16(
define <vscale x 16 x half> @intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.f16(
<vscale x 16 x half> %0,
Expand Down Expand Up @@ -669,7 +669,7 @@ declare <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.f16(
define <vscale x 32 x half> @intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,tu,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.f16(
<vscale x 32 x half> %0,
Expand Down Expand Up @@ -709,7 +709,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32(
define <vscale x 1 x float> @intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32(
<vscale x 1 x float> %0,
Expand Down Expand Up @@ -749,7 +749,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.f32(
define <vscale x 2 x float> @intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.f32(
<vscale x 2 x float> %0,
Expand Down Expand Up @@ -789,7 +789,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.f32(
define <vscale x 4 x float> @intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.f32(
<vscale x 4 x float> %0,
Expand Down Expand Up @@ -829,7 +829,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.f32(
define <vscale x 8 x float> @intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.f32(
<vscale x 8 x float> %0,
Expand Down Expand Up @@ -869,7 +869,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.f32(
define <vscale x 16 x float> @intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,tu,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.f32(
<vscale x 16 x float> %0,
Expand Down
60 changes: 30 additions & 30 deletions llvm/test/CodeGen/RISCV/rvv/vfsub-rv64.ll

Large diffs are not rendered by default.

20 changes: 10 additions & 10 deletions llvm/test/CodeGen/RISCV/rvv/vfwadd-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f16(
define <vscale x 1 x float> @intrinsic_vfwadd_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f16_nxv1f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f16(
<vscale x 1 x float> %0,
Expand Down Expand Up @@ -68,7 +68,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f16(
define <vscale x 2 x float> @intrinsic_vfwadd_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f16_nxv2f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f16(
<vscale x 2 x float> %0,
Expand Down Expand Up @@ -108,7 +108,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f16(
define <vscale x 4 x float> @intrinsic_vfwadd_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f16_nxv4f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f16(
<vscale x 4 x float> %0,
Expand Down Expand Up @@ -148,7 +148,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f16(
define <vscale x 8 x float> @intrinsic_vfwadd_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f16_nxv8f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f16(
<vscale x 8 x float> %0,
Expand Down Expand Up @@ -188,7 +188,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f16(
define <vscale x 16 x float> @intrinsic_vfwadd_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f16_nxv16f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f16(
<vscale x 16 x float> %0,
Expand Down Expand Up @@ -228,7 +228,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f16.f16(
define <vscale x 1 x float> @intrinsic_vfwadd_mask_vf_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f16.f16(
<vscale x 1 x float> %0,
Expand Down Expand Up @@ -268,7 +268,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f16.f16(
define <vscale x 2 x float> @intrinsic_vfwadd_mask_vf_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f16.f16(
<vscale x 2 x float> %0,
Expand Down Expand Up @@ -308,7 +308,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f16.f16(
define <vscale x 4 x float> @intrinsic_vfwadd_mask_vf_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f16.f16(
<vscale x 4 x float> %0,
Expand Down Expand Up @@ -348,7 +348,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f16.f16(
define <vscale x 8 x float> @intrinsic_vfwadd_mask_vf_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f16.f16(
<vscale x 8 x float> %0,
Expand Down Expand Up @@ -388,7 +388,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f16.f16(
define <vscale x 16 x float> @intrinsic_vfwadd_mask_vf_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f16.f16(
<vscale x 16 x float> %0,
Expand Down
36 changes: 18 additions & 18 deletions llvm/test/CodeGen/RISCV/rvv/vfwadd-rv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f16(
define <vscale x 1 x float> @intrinsic_vfwadd_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f16_nxv1f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f16(
<vscale x 1 x float> %0,
Expand Down Expand Up @@ -68,7 +68,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f16(
define <vscale x 2 x float> @intrinsic_vfwadd_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f16_nxv2f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f16(
<vscale x 2 x float> %0,
Expand Down Expand Up @@ -108,7 +108,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f16(
define <vscale x 4 x float> @intrinsic_vfwadd_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f16_nxv4f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f16(
<vscale x 4 x float> %0,
Expand Down Expand Up @@ -148,7 +148,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f16(
define <vscale x 8 x float> @intrinsic_vfwadd_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f16_nxv8f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f16(
<vscale x 8 x float> %0,
Expand Down Expand Up @@ -188,7 +188,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f16(
define <vscale x 16 x float> @intrinsic_vfwadd_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv16f16_nxv16f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f16(
<vscale x 16 x float> %0,
Expand Down Expand Up @@ -228,7 +228,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f32(
define <vscale x 1 x double> @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f32(
<vscale x 1 x double> %0,
Expand Down Expand Up @@ -268,7 +268,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f32(
define <vscale x 2 x double> @intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv2f32_nxv2f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f32(
<vscale x 2 x double> %0,
Expand Down Expand Up @@ -308,7 +308,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f32(
define <vscale x 4 x double> @intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv4f32_nxv4f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f32(
<vscale x 4 x double> %0,
Expand Down Expand Up @@ -348,7 +348,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f32(
define <vscale x 8 x double> @intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv8f32_nxv8f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
; CHECK: vfwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f32(
<vscale x 8 x double> %0,
Expand Down Expand Up @@ -388,7 +388,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f16.f16(
define <vscale x 1 x float> @intrinsic_vfwadd_mask_vf_nxv1f16_f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfwadd.mask.nxv1f16.f16(
<vscale x 1 x float> %0,
Expand Down Expand Up @@ -428,7 +428,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f16.f16(
define <vscale x 2 x float> @intrinsic_vfwadd_mask_vf_nxv2f16_f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfwadd.mask.nxv2f16.f16(
<vscale x 2 x float> %0,
Expand Down Expand Up @@ -468,7 +468,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f16.f16(
define <vscale x 4 x float> @intrinsic_vfwadd_mask_vf_nxv4f16_f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfwadd.mask.nxv4f16.f16(
<vscale x 4 x float> %0,
Expand Down Expand Up @@ -508,7 +508,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f16.f16(
define <vscale x 8 x float> @intrinsic_vfwadd_mask_vf_nxv8f16_f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfwadd.mask.nxv8f16.f16(
<vscale x 8 x float> %0,
Expand Down Expand Up @@ -548,7 +548,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f16.f16(
define <vscale x 16 x float> @intrinsic_vfwadd_mask_vf_nxv16f16_f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv16f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfwadd.mask.nxv16f16.f16(
<vscale x 16 x float> %0,
Expand Down Expand Up @@ -588,7 +588,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f32.f32(
define <vscale x 1 x double> @intrinsic_vfwadd_mask_vf_nxv1f32_f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv1f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x double> @llvm.riscv.vfwadd.mask.nxv1f32.f32(
<vscale x 1 x double> %0,
Expand Down Expand Up @@ -628,7 +628,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f32.f32(
define <vscale x 2 x double> @intrinsic_vfwadd_mask_vf_nxv2f32_f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv2f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x double> @llvm.riscv.vfwadd.mask.nxv2f32.f32(
<vscale x 2 x double> %0,
Expand Down Expand Up @@ -668,7 +668,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f32.f32(
define <vscale x 4 x double> @intrinsic_vfwadd_mask_vf_nxv4f32_f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv4f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x double> @llvm.riscv.vfwadd.mask.nxv4f32.f32(
<vscale x 4 x double> %0,
Expand Down Expand Up @@ -708,7 +708,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f32.f32(
define <vscale x 8 x double> @intrinsic_vfwadd_mask_vf_nxv8f32_f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd_mask_vf_nxv8f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu
; CHECK: vfwadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x double> @llvm.riscv.vfwadd.mask.nxv8f32.f32(
<vscale x 8 x double> %0,
Expand Down
20 changes: 10 additions & 10 deletions llvm/test/CodeGen/RISCV/rvv/vfwadd.w-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f16(
define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv1f32_nxv1f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f16(
<vscale x 1 x float> %0,
Expand Down Expand Up @@ -68,7 +68,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f16(
define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv2f32_nxv2f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f16(
<vscale x 2 x float> %0,
Expand Down Expand Up @@ -108,7 +108,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f16(
define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv4f32_nxv4f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f16(
<vscale x 4 x float> %0,
Expand Down Expand Up @@ -148,7 +148,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f16(
define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv8f32_nxv8f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f16(
<vscale x 8 x float> %0,
Expand Down Expand Up @@ -188,7 +188,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f16(
define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wv_nxv16f32_nxv16f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
; CHECK: vfwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f16(
<vscale x 16 x float> %0,
Expand Down Expand Up @@ -228,7 +228,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.f16(
define <vscale x 1 x float> @intrinsic_vfwadd.w_mask_wf_nxv1f32_f16(<vscale x 1 x float> %0, <vscale x 1 x float> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv1f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfwadd.w.mask.nxv1f32.f16(
<vscale x 1 x float> %0,
Expand Down Expand Up @@ -268,7 +268,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.f16(
define <vscale x 2 x float> @intrinsic_vfwadd.w_mask_wf_nxv2f32_f16(<vscale x 2 x float> %0, <vscale x 2 x float> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv2f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfwadd.w.mask.nxv2f32.f16(
<vscale x 2 x float> %0,
Expand Down Expand Up @@ -308,7 +308,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.f16(
define <vscale x 4 x float> @intrinsic_vfwadd.w_mask_wf_nxv4f32_f16(<vscale x 4 x float> %0, <vscale x 4 x float> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv4f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfwadd.w.mask.nxv4f32.f16(
<vscale x 4 x float> %0,
Expand Down Expand Up @@ -348,7 +348,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.f16(
define <vscale x 8 x float> @intrinsic_vfwadd.w_mask_wf_nxv8f32_f16(<vscale x 8 x float> %0, <vscale x 8 x float> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv8f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfwadd.w.mask.nxv8f32.f16(
<vscale x 8 x float> %0,
Expand Down Expand Up @@ -388,7 +388,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.f16(
define <vscale x 16 x float> @intrinsic_vfwadd.w_mask_wf_nxv16f32_f16(<vscale x 16 x float> %0, <vscale x 16 x float> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfwadd.w_mask_wf_nxv16f32_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu
; CHECK: vfwadd.wf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfwadd.w.mask.nxv16f32.f16(
<vscale x 16 x float> %0,
Expand Down
Loading