106 changes: 62 additions & 44 deletions llvm/test/CodeGen/RISCV/rvv/vmseq.ll
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8(
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmseq.vv v0, v8, v9
; CHECK-NEXT: vmseq.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8(
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmseq.vv v0, v8, v9
; CHECK-NEXT: vmseq.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8(
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmseq.vv v0, v8, v9
; CHECK-NEXT: vmseq.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -187,9 +190,10 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8(
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmseq.vv v0, v8, v9
; CHECK-NEXT: vmseq.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8(
define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmseq.vv v0, v8, v10
; CHECK-NEXT: vmseq.vv v14, v10, v12, v0.t
; CHECK-NEXT: vmseq.vv v14, v8, v10
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmseq.vv v8, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
Expand Down Expand Up @@ -289,11 +294,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8(
define <vscale x 32 x i1> @intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmseq.vv v0, v8, v12
; CHECK-NEXT: vmseq.vv v20, v12, v16, v0.t
; CHECK-NEXT: vmseq.vv v20, v8, v12
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmseq.vv v8, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
Expand Down Expand Up @@ -340,9 +346,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16(
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmseq.vv v0, v8, v9
; CHECK-NEXT: vmseq.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -391,9 +398,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16(
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmseq.vv v0, v8, v9
; CHECK-NEXT: vmseq.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -442,9 +450,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16(
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmseq.vv v0, v8, v9
; CHECK-NEXT: vmseq.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -493,11 +502,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16(
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmseq.vv v0, v8, v10
; CHECK-NEXT: vmseq.vv v14, v10, v12, v0.t
; CHECK-NEXT: vmseq.vv v14, v8, v10
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmseq.vv v8, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
Expand Down Expand Up @@ -544,11 +554,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16(
define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmseq.vv v0, v8, v12
; CHECK-NEXT: vmseq.vv v20, v12, v16, v0.t
; CHECK-NEXT: vmseq.vv v20, v8, v12
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmseq.vv v8, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
Expand Down Expand Up @@ -595,9 +606,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32(
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmseq.vv v0, v8, v9
; CHECK-NEXT: vmseq.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -646,9 +658,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32(
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmseq.vv v0, v8, v9
; CHECK-NEXT: vmseq.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -697,11 +710,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32(
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmseq.vv v0, v8, v10
; CHECK-NEXT: vmseq.vv v14, v10, v12, v0.t
; CHECK-NEXT: vmseq.vv v14, v8, v10
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmseq.vv v8, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
Expand Down Expand Up @@ -748,11 +762,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32(
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmseq.vv v0, v8, v12
; CHECK-NEXT: vmseq.vv v20, v12, v16, v0.t
; CHECK-NEXT: vmseq.vv v20, v8, v12
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmseq.vv v8, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
Expand Down Expand Up @@ -799,9 +814,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64(
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmseq.vv v0, v8, v9
; CHECK-NEXT: vmseq.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -850,11 +866,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64(
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmseq.vv v0, v8, v10
; CHECK-NEXT: vmseq.vv v14, v10, v12, v0.t
; CHECK-NEXT: vmseq.vv v14, v8, v10
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmseq.vv v8, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
Expand Down Expand Up @@ -901,11 +918,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64(
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmseq.vv v0, v8, v12
; CHECK-NEXT: vmseq.vv v20, v12, v16, v0.t
; CHECK-NEXT: vmseq.vv v20, v8, v12
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmseq.vv v8, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
Expand Down
106 changes: 62 additions & 44 deletions llvm/test/CodeGen/RISCV/rvv/vmsge.ll
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8(
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmsle.vv v0, v9, v8
; CHECK-NEXT: vmsle.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8(
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmsle.vv v0, v9, v8
; CHECK-NEXT: vmsle.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8(
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmsle.vv v0, v9, v8
; CHECK-NEXT: vmsle.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -187,9 +190,10 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8(
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmsle.vv v0, v9, v8
; CHECK-NEXT: vmsle.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8(
define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmsle.vv v0, v10, v8
; CHECK-NEXT: vmsle.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmsle.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmsle.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
Expand Down Expand Up @@ -289,11 +294,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8(
define <vscale x 32 x i1> @intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmsle.vv v0, v12, v8
; CHECK-NEXT: vmsle.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmsle.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmsle.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
Expand Down Expand Up @@ -340,9 +346,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16(
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmsle.vv v0, v9, v8
; CHECK-NEXT: vmsle.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -391,9 +398,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16(
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmsle.vv v0, v9, v8
; CHECK-NEXT: vmsle.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -442,9 +450,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16(
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmsle.vv v0, v9, v8
; CHECK-NEXT: vmsle.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -493,11 +502,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16(
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmsle.vv v0, v10, v8
; CHECK-NEXT: vmsle.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmsle.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmsle.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
Expand Down Expand Up @@ -544,11 +554,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16(
define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmsle.vv v0, v12, v8
; CHECK-NEXT: vmsle.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmsle.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmsle.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
Expand Down Expand Up @@ -595,9 +606,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32(
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmsle.vv v0, v9, v8
; CHECK-NEXT: vmsle.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -646,9 +658,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32(
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmsle.vv v0, v9, v8
; CHECK-NEXT: vmsle.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -697,11 +710,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32(
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmsle.vv v0, v10, v8
; CHECK-NEXT: vmsle.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmsle.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmsle.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
Expand Down Expand Up @@ -748,11 +762,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32(
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmsle.vv v0, v12, v8
; CHECK-NEXT: vmsle.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmsle.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmsle.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
Expand Down Expand Up @@ -799,9 +814,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64(
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmsle.vv v0, v9, v8
; CHECK-NEXT: vmsle.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -850,11 +866,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64(
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmsle.vv v0, v10, v8
; CHECK-NEXT: vmsle.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmsle.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmsle.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
Expand Down Expand Up @@ -901,11 +918,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64(
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmsle.vv v0, v12, v8
; CHECK-NEXT: vmsle.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmsle.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmsle.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
Expand Down
106 changes: 62 additions & 44 deletions llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8(
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v9, v8
; CHECK-NEXT: vmsleu.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8(
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v9, v8
; CHECK-NEXT: vmsleu.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8(
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v9, v8
; CHECK-NEXT: vmsleu.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -187,9 +190,10 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8(
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v9, v8
; CHECK-NEXT: vmsleu.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8(
define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v10, v8
; CHECK-NEXT: vmsleu.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmsleu.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmsleu.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8(
Expand Down Expand Up @@ -289,11 +294,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8(
define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v12, v8
; CHECK-NEXT: vmsleu.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmsleu.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmsleu.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8(
Expand Down Expand Up @@ -340,9 +346,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16(
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v9, v8
; CHECK-NEXT: vmsleu.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -391,9 +398,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16(
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v9, v8
; CHECK-NEXT: vmsleu.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -442,9 +450,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16(
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v9, v8
; CHECK-NEXT: vmsleu.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -493,11 +502,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16(
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v10, v8
; CHECK-NEXT: vmsleu.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmsleu.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmsleu.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16(
Expand Down Expand Up @@ -544,11 +554,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16(
define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v12, v8
; CHECK-NEXT: vmsleu.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmsleu.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmsleu.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16(
Expand Down Expand Up @@ -595,9 +606,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32(
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v9, v8
; CHECK-NEXT: vmsleu.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -646,9 +658,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32(
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v9, v8
; CHECK-NEXT: vmsleu.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -697,11 +710,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32(
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v10, v8
; CHECK-NEXT: vmsleu.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmsleu.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmsleu.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32(
Expand Down Expand Up @@ -748,11 +762,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32(
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v12, v8
; CHECK-NEXT: vmsleu.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmsleu.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmsleu.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32(
Expand Down Expand Up @@ -799,9 +814,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64(
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v9, v8
; CHECK-NEXT: vmsleu.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -850,11 +866,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64(
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v10, v8
; CHECK-NEXT: vmsleu.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmsleu.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmsleu.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64(
Expand Down Expand Up @@ -901,11 +918,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64(
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v12, v8
; CHECK-NEXT: vmsleu.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmsleu.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmsleu.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64(
Expand Down
106 changes: 62 additions & 44 deletions llvm/test/CodeGen/RISCV/rvv/vmsgt.ll
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8(
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmslt.vv v0, v9, v8
; CHECK-NEXT: vmslt.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8(
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmslt.vv v0, v9, v8
; CHECK-NEXT: vmslt.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8(
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmslt.vv v0, v9, v8
; CHECK-NEXT: vmslt.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -187,9 +190,10 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8(
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmslt.vv v0, v9, v8
; CHECK-NEXT: vmslt.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8(
define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmslt.vv v0, v10, v8
; CHECK-NEXT: vmslt.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmslt.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmslt.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8(
Expand Down Expand Up @@ -289,11 +294,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8(
define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmslt.vv v0, v12, v8
; CHECK-NEXT: vmslt.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmslt.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmslt.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8(
Expand Down Expand Up @@ -340,9 +346,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16(
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmslt.vv v0, v9, v8
; CHECK-NEXT: vmslt.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -391,9 +398,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16(
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmslt.vv v0, v9, v8
; CHECK-NEXT: vmslt.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -442,9 +450,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16(
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmslt.vv v0, v9, v8
; CHECK-NEXT: vmslt.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -493,11 +502,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16(
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmslt.vv v0, v10, v8
; CHECK-NEXT: vmslt.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmslt.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmslt.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16(
Expand Down Expand Up @@ -544,11 +554,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16(
define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmslt.vv v0, v12, v8
; CHECK-NEXT: vmslt.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmslt.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmslt.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16(
Expand Down Expand Up @@ -595,9 +606,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32(
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmslt.vv v0, v9, v8
; CHECK-NEXT: vmslt.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -646,9 +658,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32(
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmslt.vv v0, v9, v8
; CHECK-NEXT: vmslt.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -697,11 +710,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32(
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmslt.vv v0, v10, v8
; CHECK-NEXT: vmslt.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmslt.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmslt.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32(
Expand Down Expand Up @@ -748,11 +762,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32(
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmslt.vv v0, v12, v8
; CHECK-NEXT: vmslt.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmslt.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmslt.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32(
Expand Down Expand Up @@ -799,9 +814,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64(
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmslt.vv v0, v9, v8
; CHECK-NEXT: vmslt.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -850,11 +866,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64(
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmslt.vv v0, v10, v8
; CHECK-NEXT: vmslt.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmslt.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmslt.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64(
Expand Down Expand Up @@ -901,11 +918,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64(
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmslt.vv v0, v12, v8
; CHECK-NEXT: vmslt.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmslt.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmslt.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64(
Expand Down
106 changes: 62 additions & 44 deletions llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8(
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmsltu.vv v0, v9, v8
; CHECK-NEXT: vmsltu.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8(
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmsltu.vv v0, v9, v8
; CHECK-NEXT: vmsltu.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8(
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmsltu.vv v0, v9, v8
; CHECK-NEXT: vmsltu.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -187,9 +190,10 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8(
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmsltu.vv v0, v9, v8
; CHECK-NEXT: vmsltu.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8(
define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmsltu.vv v0, v10, v8
; CHECK-NEXT: vmsltu.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmsltu.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmsltu.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8(
Expand Down Expand Up @@ -289,11 +294,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8(
define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmsltu.vv v0, v12, v8
; CHECK-NEXT: vmsltu.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmsltu.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmsltu.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8(
Expand Down Expand Up @@ -340,9 +346,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16(
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmsltu.vv v0, v9, v8
; CHECK-NEXT: vmsltu.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -391,9 +398,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16(
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmsltu.vv v0, v9, v8
; CHECK-NEXT: vmsltu.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -442,9 +450,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16(
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmsltu.vv v0, v9, v8
; CHECK-NEXT: vmsltu.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -493,11 +502,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16(
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmsltu.vv v0, v10, v8
; CHECK-NEXT: vmsltu.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmsltu.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmsltu.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16(
Expand Down Expand Up @@ -544,11 +554,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16(
define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmsltu.vv v0, v12, v8
; CHECK-NEXT: vmsltu.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmsltu.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmsltu.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16(
Expand Down Expand Up @@ -595,9 +606,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32(
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmsltu.vv v0, v9, v8
; CHECK-NEXT: vmsltu.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -646,9 +658,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32(
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmsltu.vv v0, v9, v8
; CHECK-NEXT: vmsltu.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -697,11 +710,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32(
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmsltu.vv v0, v10, v8
; CHECK-NEXT: vmsltu.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmsltu.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmsltu.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32(
Expand Down Expand Up @@ -748,11 +762,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32(
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmsltu.vv v0, v12, v8
; CHECK-NEXT: vmsltu.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmsltu.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmsltu.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32(
Expand Down Expand Up @@ -799,9 +814,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64(
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmsltu.vv v0, v9, v8
; CHECK-NEXT: vmsltu.vv v8, v9, v8
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -850,11 +866,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64(
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmsltu.vv v0, v10, v8
; CHECK-NEXT: vmsltu.vv v14, v12, v10, v0.t
; CHECK-NEXT: vmsltu.vv v14, v10, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmsltu.vv v8, v12, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64(
Expand Down Expand Up @@ -901,11 +918,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64(
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmsltu.vv v0, v12, v8
; CHECK-NEXT: vmsltu.vv v20, v16, v12, v0.t
; CHECK-NEXT: vmsltu.vv v20, v12, v8
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmsltu.vv v8, v16, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64(
Expand Down
106 changes: 62 additions & 44 deletions llvm/test/CodeGen/RISCV/rvv/vmsle.ll
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8(
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmsle.vv v0, v8, v9
; CHECK-NEXT: vmsle.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8(
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmsle.vv v0, v8, v9
; CHECK-NEXT: vmsle.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8(
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmsle.vv v0, v8, v9
; CHECK-NEXT: vmsle.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -187,9 +190,10 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8(
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmsle.vv v0, v8, v9
; CHECK-NEXT: vmsle.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8(
define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmsle.vv v0, v8, v10
; CHECK-NEXT: vmsle.vv v14, v10, v12, v0.t
; CHECK-NEXT: vmsle.vv v14, v8, v10
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmsle.vv v8, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
Expand Down Expand Up @@ -289,11 +294,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8(
define <vscale x 32 x i1> @intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmsle.vv v0, v8, v12
; CHECK-NEXT: vmsle.vv v20, v12, v16, v0.t
; CHECK-NEXT: vmsle.vv v20, v8, v12
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmsle.vv v8, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
Expand Down Expand Up @@ -340,9 +346,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16(
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmsle.vv v0, v8, v9
; CHECK-NEXT: vmsle.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -391,9 +398,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16(
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmsle.vv v0, v8, v9
; CHECK-NEXT: vmsle.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -442,9 +450,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16(
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmsle.vv v0, v8, v9
; CHECK-NEXT: vmsle.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -493,11 +502,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16(
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmsle.vv v0, v8, v10
; CHECK-NEXT: vmsle.vv v14, v10, v12, v0.t
; CHECK-NEXT: vmsle.vv v14, v8, v10
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmsle.vv v8, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
Expand Down Expand Up @@ -544,11 +554,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16(
define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmsle.vv v0, v8, v12
; CHECK-NEXT: vmsle.vv v20, v12, v16, v0.t
; CHECK-NEXT: vmsle.vv v20, v8, v12
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmsle.vv v8, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
Expand Down Expand Up @@ -595,9 +606,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32(
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmsle.vv v0, v8, v9
; CHECK-NEXT: vmsle.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -646,9 +658,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32(
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmsle.vv v0, v8, v9
; CHECK-NEXT: vmsle.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -697,11 +710,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32(
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmsle.vv v0, v8, v10
; CHECK-NEXT: vmsle.vv v14, v10, v12, v0.t
; CHECK-NEXT: vmsle.vv v14, v8, v10
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmsle.vv v8, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
Expand Down Expand Up @@ -748,11 +762,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32(
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmsle.vv v0, v8, v12
; CHECK-NEXT: vmsle.vv v20, v12, v16, v0.t
; CHECK-NEXT: vmsle.vv v20, v8, v12
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmsle.vv v8, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
Expand Down Expand Up @@ -799,9 +814,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64(
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmsle.vv v0, v8, v9
; CHECK-NEXT: vmsle.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -850,11 +866,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64(
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmsle.vv v0, v8, v10
; CHECK-NEXT: vmsle.vv v14, v10, v12, v0.t
; CHECK-NEXT: vmsle.vv v14, v8, v10
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmsle.vv v8, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64(
Expand Down Expand Up @@ -901,11 +918,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64(
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmsle.vv v0, v8, v12
; CHECK-NEXT: vmsle.vv v20, v12, v16, v0.t
; CHECK-NEXT: vmsle.vv v20, v8, v12
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmsle.vv v8, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64(
Expand Down
106 changes: 62 additions & 44 deletions llvm/test/CodeGen/RISCV/rvv/vmsleu.ll
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8(
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v8, v9
; CHECK-NEXT: vmsleu.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8(
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v8, v9
; CHECK-NEXT: vmsleu.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8(
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v8, v9
; CHECK-NEXT: vmsleu.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -187,9 +190,10 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8(
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v8, v9
; CHECK-NEXT: vmsleu.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8(
define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v8, v10
; CHECK-NEXT: vmsleu.vv v14, v10, v12, v0.t
; CHECK-NEXT: vmsleu.vv v14, v8, v10
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmsleu.vv v8, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
Expand Down Expand Up @@ -289,11 +294,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8(
define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v8, v12
; CHECK-NEXT: vmsleu.vv v20, v12, v16, v0.t
; CHECK-NEXT: vmsleu.vv v20, v8, v12
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmsleu.vv v8, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
Expand Down Expand Up @@ -340,9 +346,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16(
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v8, v9
; CHECK-NEXT: vmsleu.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -391,9 +398,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16(
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v8, v9
; CHECK-NEXT: vmsleu.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -442,9 +450,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16(
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v8, v9
; CHECK-NEXT: vmsleu.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -493,11 +502,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16(
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v8, v10
; CHECK-NEXT: vmsleu.vv v14, v10, v12, v0.t
; CHECK-NEXT: vmsleu.vv v14, v8, v10
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmsleu.vv v8, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
Expand Down Expand Up @@ -544,11 +554,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16(
define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v8, v12
; CHECK-NEXT: vmsleu.vv v20, v12, v16, v0.t
; CHECK-NEXT: vmsleu.vv v20, v8, v12
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmsleu.vv v8, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
Expand Down Expand Up @@ -595,9 +606,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32(
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v8, v9
; CHECK-NEXT: vmsleu.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -646,9 +658,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32(
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v8, v9
; CHECK-NEXT: vmsleu.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -697,11 +710,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32(
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v8, v10
; CHECK-NEXT: vmsleu.vv v14, v10, v12, v0.t
; CHECK-NEXT: vmsleu.vv v14, v8, v10
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmsleu.vv v8, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
Expand Down Expand Up @@ -748,11 +762,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32(
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v8, v12
; CHECK-NEXT: vmsleu.vv v20, v12, v16, v0.t
; CHECK-NEXT: vmsleu.vv v20, v8, v12
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmsleu.vv v8, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
Expand Down Expand Up @@ -799,9 +814,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64(
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v8, v9
; CHECK-NEXT: vmsleu.vv v8, v8, v9
; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
Expand Down Expand Up @@ -850,11 +866,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64(
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v8, v10
; CHECK-NEXT: vmsleu.vv v14, v10, v12, v0.t
; CHECK-NEXT: vmsleu.vv v14, v8, v10
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
; CHECK-NEXT: vmsleu.vv v8, v10, v12, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64(
Expand Down Expand Up @@ -901,11 +918,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64(
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmsleu.vv v0, v8, v12
; CHECK-NEXT: vmsleu.vv v20, v12, v16, v0.t
; CHECK-NEXT: vmsleu.vv v20, v8, v12
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vmsleu.vv v8, v12, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64(
Expand Down
Loading