72 changes: 30 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/vmfge.ll
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f16(
define <vscale x 1 x i1> @intrinsic_vmfge_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmfle.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmfle.vv v0, v9, v8
; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -86,10 +85,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f16(
define <vscale x 2 x i1> @intrinsic_vmfge_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f16_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmfle.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmfle.vv v0, v9, v8
; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -138,10 +136,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f16(
define <vscale x 4 x i1> @intrinsic_vmfge_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmfle.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmfle.vv v0, v9, v8
; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -190,12 +187,11 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f16(
define <vscale x 8 x i1> @intrinsic_vmfge_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8f16_nxv8f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmfle.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmfle.vv v0, v10, v8
; CHECK-NEXT: vmfle.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmfle.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f16(
Expand Down Expand Up @@ -242,12 +238,11 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f16(
define <vscale x 16 x i1> @intrinsic_vmfge_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv16f16_nxv16f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmfle.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmfle.vv v0, v12, v8
; CHECK-NEXT: vmfle.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmfle.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f16(
Expand Down Expand Up @@ -294,10 +289,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32(
define <vscale x 1 x i1> @intrinsic_vmfge_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f32_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmfle.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmfle.vv v0, v9, v8
; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -346,10 +340,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32(
define <vscale x 2 x i1> @intrinsic_vmfge_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmfle.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmfle.vv v0, v9, v8
; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -398,12 +391,11 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32(
define <vscale x 4 x i1> @intrinsic_vmfge_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f32_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmfle.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmfle.vv v0, v10, v8
; CHECK-NEXT: vmfle.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmfle.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32(
Expand Down Expand Up @@ -450,12 +442,11 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32(
define <vscale x 8 x i1> @intrinsic_vmfge_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8f32_nxv8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmfle.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmfle.vv v0, v12, v8
; CHECK-NEXT: vmfle.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmfle.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32(
Expand Down Expand Up @@ -502,10 +493,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64(
define <vscale x 1 x i1> @intrinsic_vmfge_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmfle.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmfle.vv v0, v9, v8
; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -554,12 +544,11 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64(
define <vscale x 2 x i1> @intrinsic_vmfge_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmfle.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmfle.vv v0, v10, v8
; CHECK-NEXT: vmfle.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmfle.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64(
Expand Down Expand Up @@ -606,12 +595,11 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64(
define <vscale x 4 x i1> @intrinsic_vmfge_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmfle.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmfle.vv v0, v12, v8
; CHECK-NEXT: vmfle.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmfle.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64(
Expand Down
72 changes: 30 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/vmfgt.ll
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f16(
define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmflt.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmflt.vv v0, v9, v8
; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -86,10 +85,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f16(
define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f16_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmflt.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmflt.vv v0, v9, v8
; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -138,10 +136,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f16(
define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmflt.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmflt.vv v0, v9, v8
; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -190,12 +187,11 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f16(
define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8f16_nxv8f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmflt.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmflt.vv v0, v10, v8
; CHECK-NEXT: vmflt.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmflt.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f16(
Expand Down Expand Up @@ -242,12 +238,11 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16f16(
define <vscale x 16 x i1> @intrinsic_vmfgt_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv16f16_nxv16f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmflt.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmflt.vv v0, v12, v8
; CHECK-NEXT: vmflt.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmflt.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16f16(
Expand Down Expand Up @@ -294,10 +289,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32(
define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f32_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmflt.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmflt.vv v0, v9, v8
; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -346,10 +340,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f32(
define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmflt.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmflt.vv v0, v9, v8
; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -398,12 +391,11 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f32(
define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f32_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmflt.vv v0, v10, v8
; CHECK-NEXT: vmflt.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmflt.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f32(
Expand Down Expand Up @@ -450,12 +442,11 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f32(
define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8f32_nxv8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmflt.vv v0, v12, v8
; CHECK-NEXT: vmflt.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmflt.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f32(
Expand Down Expand Up @@ -502,10 +493,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f64(
define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmflt.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmflt.vv v0, v9, v8
; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -554,12 +544,11 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f64(
define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmflt.vv v0, v10, v8
; CHECK-NEXT: vmflt.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmflt.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f64(
Expand Down Expand Up @@ -606,12 +595,11 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f64(
define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmflt.vv v0, v12, v8
; CHECK-NEXT: vmflt.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmflt.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f64(
Expand Down
72 changes: 30 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/vmfle.ll
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16(
define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmfle.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmfle.vv v0, v8, v9
; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -86,10 +85,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16(
define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmfle.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmfle.vv v0, v8, v9
; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -138,10 +136,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16(
define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmfle.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmfle.vv v0, v8, v9
; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -190,12 +187,11 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16(
define <vscale x 8 x i1> @intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmfle.vv v14, v8, v10
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmfle.vv v0, v8, v10
; CHECK-NEXT: vmfle.vv v14, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmfle.vv v8, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16(
Expand Down Expand Up @@ -242,12 +238,11 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16(
define <vscale x 16 x i1> @intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmfle.vv v20, v8, v12
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmfle.vv v0, v8, v12
; CHECK-NEXT: vmfle.vv v20, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmfle.vv v8, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16(
Expand Down Expand Up @@ -294,10 +289,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32(
define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmfle.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmfle.vv v0, v8, v9
; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -346,10 +340,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32(
define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmfle.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmfle.vv v0, v8, v9
; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -398,12 +391,11 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32(
define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmfle.vv v14, v8, v10
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmfle.vv v0, v8, v10
; CHECK-NEXT: vmfle.vv v14, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmfle.vv v8, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32(
Expand Down Expand Up @@ -450,12 +442,11 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32(
define <vscale x 8 x i1> @intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmfle.vv v20, v8, v12
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmfle.vv v0, v8, v12
; CHECK-NEXT: vmfle.vv v20, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmfle.vv v8, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32(
Expand Down Expand Up @@ -502,10 +493,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64(
define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmfle.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmfle.vv v0, v8, v9
; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -554,12 +544,11 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64(
define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmfle.vv v14, v8, v10
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmfle.vv v0, v8, v10
; CHECK-NEXT: vmfle.vv v14, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmfle.vv v8, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64(
Expand Down Expand Up @@ -606,12 +595,11 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64(
define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmfle.vv v20, v8, v12
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmfle.vv v0, v8, v12
; CHECK-NEXT: vmfle.vv v20, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmfle.vv v8, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64(
Expand Down
72 changes: 30 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/vmflt.ll
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f16(
define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmflt.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmflt.vv v0, v8, v9
; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -86,10 +85,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f16(
define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmflt.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmflt.vv v0, v8, v9
; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -138,10 +136,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f16(
define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmflt.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmflt.vv v0, v8, v9
; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -190,12 +187,11 @@ declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f16(
define <vscale x 8 x i1> @intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmflt.vv v14, v8, v10
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmflt.vv v0, v8, v10
; CHECK-NEXT: vmflt.vv v14, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmflt.vv v8, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f16(
Expand Down Expand Up @@ -242,12 +238,11 @@ declare <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f16(
define <vscale x 16 x i1> @intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmflt.vv v20, v8, v12
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmflt.vv v0, v8, v12
; CHECK-NEXT: vmflt.vv v20, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmflt.vv v8, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f16(
Expand Down Expand Up @@ -294,10 +289,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32(
define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmflt.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmflt.vv v0, v8, v9
; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -346,10 +340,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32(
define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmflt.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmflt.vv v0, v8, v9
; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -398,12 +391,11 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32(
define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vv v14, v8, v10
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmflt.vv v0, v8, v10
; CHECK-NEXT: vmflt.vv v14, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmflt.vv v8, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32(
Expand Down Expand Up @@ -450,12 +442,11 @@ declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32(
define <vscale x 8 x i1> @intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vv v20, v8, v12
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmflt.vv v0, v8, v12
; CHECK-NEXT: vmflt.vv v20, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmflt.vv v8, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32(
Expand Down Expand Up @@ -502,10 +493,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64(
define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmflt.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmflt.vv v0, v8, v9
; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -554,12 +544,11 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64(
define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vv v14, v8, v10
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmflt.vv v0, v8, v10
; CHECK-NEXT: vmflt.vv v14, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmflt.vv v8, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64(
Expand Down Expand Up @@ -606,12 +595,11 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64(
define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vv v20, v8, v12
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmflt.vv v0, v8, v12
; CHECK-NEXT: vmflt.vv v20, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmflt.vv v8, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64(
Expand Down
72 changes: 30 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/vmfne.ll
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16(
define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmfne.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v9
; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -86,10 +85,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16(
define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmfne.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v9
; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -138,10 +136,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16(
define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmfne.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v9
; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -190,12 +187,11 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16(
define <vscale x 8 x i1> @intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmfne.vv v14, v8, v10
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmfne.vv v0, v8, v10
; CHECK-NEXT: vmfne.vv v14, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmfne.vv v8, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16(
Expand Down Expand Up @@ -242,12 +238,11 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16(
define <vscale x 16 x i1> @intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmfne.vv v20, v8, v12
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmfne.vv v0, v8, v12
; CHECK-NEXT: vmfne.vv v20, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmfne.vv v8, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16(
Expand Down Expand Up @@ -294,10 +289,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32(
define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmfne.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v9
; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -346,10 +340,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32(
define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmfne.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v9
; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -398,12 +391,11 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32(
define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmfne.vv v14, v8, v10
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmfne.vv v0, v8, v10
; CHECK-NEXT: vmfne.vv v14, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmfne.vv v8, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32(
Expand Down Expand Up @@ -450,12 +442,11 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32(
define <vscale x 8 x i1> @intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmfne.vv v20, v8, v12
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmfne.vv v0, v8, v12
; CHECK-NEXT: vmfne.vv v20, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmfne.vv v8, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32(
Expand Down Expand Up @@ -502,10 +493,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64(
define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmfne.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v9
; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -554,12 +544,11 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64(
define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmfne.vv v14, v8, v10
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmfne.vv v0, v8, v10
; CHECK-NEXT: vmfne.vv v14, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmfne.vv v8, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64(
Expand Down Expand Up @@ -606,12 +595,11 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64(
define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmfne.vv v20, v8, v12
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmfne.vv v0, v8, v12
; CHECK-NEXT: vmfne.vv v20, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmfne.vv v8, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64(
Expand Down
106 changes: 44 additions & 62 deletions llvm/test/CodeGen/RISCV/rvv/vmseq.ll
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8(
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmseq.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmseq.vv v0, v8, v9
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -86,10 +85,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8(
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmseq.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmseq.vv v0, v8, v9
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -138,10 +136,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8(
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmseq.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmseq.vv v0, v8, v9
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -190,10 +187,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8(
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmseq.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmseq.vv v0, v8, v9
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -242,12 +238,11 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8(
define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmseq.vv v14, v8, v10
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmseq.vv v0, v8, v10
; CHECK-NEXT: vmseq.vv v14, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmseq.vv v8, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
Expand Down Expand Up @@ -294,12 +289,11 @@ declare <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8(
define <vscale x 32 x i1> @intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmseq.vv v20, v8, v12
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmseq.vv v0, v8, v12
; CHECK-NEXT: vmseq.vv v20, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmseq.vv v8, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
Expand Down Expand Up @@ -346,10 +340,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16(
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmseq.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmseq.vv v0, v8, v9
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -398,10 +391,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16(
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmseq.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmseq.vv v0, v8, v9
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -450,10 +442,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16(
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmseq.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmseq.vv v0, v8, v9
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -502,12 +493,11 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16(
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmseq.vv v14, v8, v10
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmseq.vv v0, v8, v10
; CHECK-NEXT: vmseq.vv v14, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmseq.vv v8, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
Expand Down Expand Up @@ -554,12 +544,11 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16(
define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmseq.vv v20, v8, v12
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmseq.vv v0, v8, v12
; CHECK-NEXT: vmseq.vv v20, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmseq.vv v8, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
Expand Down Expand Up @@ -606,10 +595,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32(
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmseq.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmseq.vv v0, v8, v9
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -658,10 +646,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32(
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmseq.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmseq.vv v0, v8, v9
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -710,12 +697,11 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32(
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmseq.vv v14, v8, v10
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmseq.vv v0, v8, v10
; CHECK-NEXT: vmseq.vv v14, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmseq.vv v8, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
Expand Down Expand Up @@ -762,12 +748,11 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32(
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmseq.vv v20, v8, v12
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmseq.vv v0, v8, v12
; CHECK-NEXT: vmseq.vv v20, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmseq.vv v8, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
Expand Down Expand Up @@ -814,10 +799,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64(
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmseq.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmseq.vv v0, v8, v9
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -866,12 +850,11 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64(
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmseq.vv v14, v8, v10
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmseq.vv v0, v8, v10
; CHECK-NEXT: vmseq.vv v14, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmseq.vv v8, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
Expand Down Expand Up @@ -918,12 +901,11 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64(
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmseq.vv v20, v8, v12
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmseq.vv v0, v8, v12
; CHECK-NEXT: vmseq.vv v20, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmseq.vv v8, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
Expand Down
106 changes: 44 additions & 62 deletions llvm/test/CodeGen/RISCV/rvv/vmsge.ll
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8(
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmsle.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmsle.vv v0, v9, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -86,10 +85,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8(
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmsle.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmsle.vv v0, v9, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -138,10 +136,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8(
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmsle.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmsle.vv v0, v9, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -190,10 +187,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8(
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmsle.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmsle.vv v0, v9, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -242,12 +238,11 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8(
define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmsle.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmsle.vv v0, v10, v8
; CHECK-NEXT: vmsle.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmsle.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
Expand Down Expand Up @@ -294,12 +289,11 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8(
define <vscale x 32 x i1> @intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmsle.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmsle.vv v0, v12, v8
; CHECK-NEXT: vmsle.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmsle.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
Expand Down Expand Up @@ -346,10 +340,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16(
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmsle.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmsle.vv v0, v9, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -398,10 +391,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16(
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmsle.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmsle.vv v0, v9, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -450,10 +442,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16(
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmsle.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmsle.vv v0, v9, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -502,12 +493,11 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16(
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmsle.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmsle.vv v0, v10, v8
; CHECK-NEXT: vmsle.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmsle.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
Expand Down Expand Up @@ -554,12 +544,11 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16(
define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmsle.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmsle.vv v0, v12, v8
; CHECK-NEXT: vmsle.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmsle.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
Expand Down Expand Up @@ -606,10 +595,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32(
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmsle.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmsle.vv v0, v9, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -658,10 +646,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32(
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmsle.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmsle.vv v0, v9, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -710,12 +697,11 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32(
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmsle.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmsle.vv v0, v10, v8
; CHECK-NEXT: vmsle.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmsle.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
Expand Down Expand Up @@ -762,12 +748,11 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32(
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmsle.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmsle.vv v0, v12, v8
; CHECK-NEXT: vmsle.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmsle.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
Expand Down Expand Up @@ -814,10 +799,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64(
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmsle.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmsle.vv v0, v9, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -866,12 +850,11 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64(
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmsle.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmsle.vv v0, v10, v8
; CHECK-NEXT: vmsle.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmsle.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
Expand Down Expand Up @@ -918,12 +901,11 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64(
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmsle.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmsle.vv v0, v12, v8
; CHECK-NEXT: vmsle.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmsle.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
Expand Down
106 changes: 44 additions & 62 deletions llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8(
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmsleu.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v9, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -86,10 +85,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8(
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmsleu.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v9, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -138,10 +136,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8(
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmsleu.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v9, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -190,10 +187,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8(
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmsleu.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v9, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -242,12 +238,11 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8(
define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmsleu.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmsleu.vv v0, v10, v8
; CHECK-NEXT: vmsleu.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmsleu.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8(
Expand Down Expand Up @@ -294,12 +289,11 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8(
define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmsleu.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmsleu.vv v0, v12, v8
; CHECK-NEXT: vmsleu.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmsleu.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8(
Expand Down Expand Up @@ -346,10 +340,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16(
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmsleu.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v9, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -398,10 +391,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16(
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmsleu.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v9, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -450,10 +442,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16(
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmsleu.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v9, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -502,12 +493,11 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16(
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmsleu.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmsleu.vv v0, v10, v8
; CHECK-NEXT: vmsleu.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmsleu.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16(
Expand Down Expand Up @@ -554,12 +544,11 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16(
define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmsleu.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmsleu.vv v0, v12, v8
; CHECK-NEXT: vmsleu.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmsleu.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16(
Expand Down Expand Up @@ -606,10 +595,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32(
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmsleu.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v9, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -658,10 +646,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32(
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmsleu.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v9, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -710,12 +697,11 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32(
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmsleu.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmsleu.vv v0, v10, v8
; CHECK-NEXT: vmsleu.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmsleu.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32(
Expand Down Expand Up @@ -762,12 +748,11 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32(
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmsleu.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmsleu.vv v0, v12, v8
; CHECK-NEXT: vmsleu.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmsleu.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32(
Expand Down Expand Up @@ -814,10 +799,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64(
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmsleu.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v9, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -866,12 +850,11 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64(
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmsleu.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmsleu.vv v0, v10, v8
; CHECK-NEXT: vmsleu.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmsleu.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64(
Expand Down Expand Up @@ -918,12 +901,11 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64(
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmsleu.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmsleu.vv v0, v12, v8
; CHECK-NEXT: vmsleu.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmsleu.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64(
Expand Down
106 changes: 44 additions & 62 deletions llvm/test/CodeGen/RISCV/rvv/vmsgt.ll
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8(
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmslt.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmslt.vv v0, v9, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -86,10 +85,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8(
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmslt.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmslt.vv v0, v9, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -138,10 +136,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8(
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmslt.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmslt.vv v0, v9, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -190,10 +187,9 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8(
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmslt.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmslt.vv v0, v9, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -242,12 +238,11 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8(
define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmslt.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmslt.vv v0, v10, v8
; CHECK-NEXT: vmslt.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmslt.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8(
Expand Down Expand Up @@ -294,12 +289,11 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8(
define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmslt.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmslt.vv v0, v12, v8
; CHECK-NEXT: vmslt.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmslt.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8(
Expand Down Expand Up @@ -346,10 +340,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16(
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmslt.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmslt.vv v0, v9, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -398,10 +391,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16(
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmslt.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmslt.vv v0, v9, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -450,10 +442,9 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16(
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmslt.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmslt.vv v0, v9, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -502,12 +493,11 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16(
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmslt.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmslt.vv v0, v10, v8
; CHECK-NEXT: vmslt.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmslt.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16(
Expand Down Expand Up @@ -554,12 +544,11 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16(
define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmslt.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmslt.vv v0, v12, v8
; CHECK-NEXT: vmslt.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmslt.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16(
Expand Down Expand Up @@ -606,10 +595,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32(
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmslt.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmslt.vv v0, v9, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -658,10 +646,9 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32(
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmslt.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmslt.vv v0, v9, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -710,12 +697,11 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32(
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmslt.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmslt.vv v0, v10, v8
; CHECK-NEXT: vmslt.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmslt.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32(
Expand Down Expand Up @@ -762,12 +748,11 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32(
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmslt.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmslt.vv v0, v12, v8
; CHECK-NEXT: vmslt.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmslt.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32(
Expand Down Expand Up @@ -814,10 +799,9 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64(
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmslt.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmslt.vv v0, v9, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -866,12 +850,11 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64(
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmslt.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmslt.vv v0, v10, v8
; CHECK-NEXT: vmslt.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmslt.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64(
Expand Down Expand Up @@ -918,12 +901,11 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64(
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmslt.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmslt.vv v0, v12, v8
; CHECK-NEXT: vmslt.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmslt.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64(
Expand Down
Loading