58 changes: 32 additions & 26 deletions llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
Original file line number Diff line number Diff line change
Expand Up @@ -2176,10 +2176,7 @@ define <vscale x 16 x i8> @mgather_baseidx_nxv16i8(ptr %base, <vscale x 16 x i8>
;
; RV64-LABEL: mgather_baseidx_nxv16i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t
; RV64-NEXT: vmv1r.v v12, v0
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: srli a1, a1, 3
; RV64-NEXT: vsetvli a2, zero, e8, mf4, ta, ma
Expand All @@ -2188,6 +2185,11 @@ define <vscale x 16 x i8> @mgather_baseidx_nxv16i8(ptr %base, <vscale x 16 x i8>
; RV64-NEXT: vsext.vf8 v16, v9
; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; RV64-NEXT: vluxei64.v v11, (a0), v16, v0.t
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t
; RV64-NEXT: vmv2r.v v8, v10
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i8, ptr %base, <vscale x 16 x i8> %idxs
Expand All @@ -2200,45 +2202,49 @@ declare <vscale x 32 x i8> @llvm.masked.gather.nxv32i8.nxv32p0(<vscale x 32 x pt
define <vscale x 32 x i8> @mgather_baseidx_nxv32i8(ptr %base, <vscale x 32 x i8> %idxs, <vscale x 32 x i1> %m, <vscale x 32 x i8> %passthru) {
; RV32-LABEL: mgather_baseidx_nxv32i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; RV32-NEXT: vsext.vf4 v16, v8
; RV32-NEXT: vsetvli zero, zero, e8, m2, ta, mu
; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t
; RV32-NEXT: vmv1r.v v16, v0
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: srli a1, a1, 2
; RV32-NEXT: vsetvli a2, zero, e8, mf2, ta, ma
; RV32-NEXT: vslidedown.vx v0, v0, a1
; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; RV32-NEXT: vsext.vf4 v16, v10
; RV32-NEXT: vsext.vf4 v24, v10
; RV32-NEXT: vsetvli zero, zero, e8, m2, ta, mu
; RV32-NEXT: vluxei32.v v14, (a0), v24, v0.t
; RV32-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; RV32-NEXT: vsext.vf4 v24, v8
; RV32-NEXT: vsetvli zero, zero, e8, m2, ta, mu
; RV32-NEXT: vluxei32.v v14, (a0), v16, v0.t
; RV32-NEXT: vmv1r.v v0, v16
; RV32-NEXT: vluxei32.v v12, (a0), v24, v0.t
; RV32-NEXT: vmv4r.v v8, v12
; RV32-NEXT: ret
;
; RV64-LABEL: mgather_baseidx_nxv32i8:
; RV64: # %bb.0:
; RV64-NEXT: vmv1r.v v16, v0
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v24, v8
; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; RV64-NEXT: vluxei64.v v12, (a0), v24, v0.t
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: srli a2, a1, 3
; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vx v0, v0, a2
; RV64-NEXT: vsetvli a3, zero, e64, m8, ta, ma
; RV64-NEXT: srli a2, a1, 2
; RV64-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
; RV64-NEXT: vslidedown.vx v17, v0, a2
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v24, v10
; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; RV64-NEXT: vmv1r.v v0, v17
; RV64-NEXT: vluxei64.v v14, (a0), v24, v0.t
; RV64-NEXT: srli a1, a1, 3
; RV64-NEXT: vsetvli a2, zero, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vx v0, v16, a1
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v24, v9
; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; RV64-NEXT: vluxei64.v v13, (a0), v24, v0.t
; RV64-NEXT: srli a1, a1, 2
; RV64-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
; RV64-NEXT: vslidedown.vx v0, v16, a1
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v10
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v24, v8
; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; RV64-NEXT: vluxei64.v v14, (a0), v16, v0.t
; RV64-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vx v0, v0, a2
; RV64-NEXT: vmv1r.v v0, v16
; RV64-NEXT: vluxei64.v v12, (a0), v24, v0.t
; RV64-NEXT: vsetvli a2, zero, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vx v0, v17, a1
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v11
; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
Original file line number Diff line number Diff line change
Expand Up @@ -905,8 +905,8 @@ define void @test_dag_loop() {
; CHECK-NEXT: vsetivli zero, 1, e16, m8, ta, ma
; CHECK-NEXT: vle16.v v8, (zero)
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
; CHECK-NEXT: vmclr.m v0
; CHECK-NEXT: vmv.v.i v16, 0
; CHECK-NEXT: vmclr.m v0
; CHECK-NEXT: vsetivli zero, 0, e8, m4, tu, mu
; CHECK-NEXT: vmv4r.v v20, v16
; CHECK-NEXT: vssubu.vx v20, v16, zero, v0.t
Expand Down
64 changes: 32 additions & 32 deletions llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -234,8 +234,8 @@ define <vscale x 1 x i1> @fcmp_ord_vf_nxv1f16(<vscale x 1 x half> %va, half %b,
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vfmv.v.f v9, fa0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmfeq.vf v9, v9, fa0, v0.t
; CHECK-NEXT: vmfeq.vv v8, v8, v8, v0.t
; CHECK-NEXT: vmfeq.vf v9, v9, fa0, v0.t
; CHECK-NEXT: vmand.mm v0, v8, v9
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
Expand All @@ -250,8 +250,8 @@ define <vscale x 1 x i1> @fcmp_ord_vf_swap_nxv1f16(<vscale x 1 x half> %va, half
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vfmv.v.f v9, fa0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmfeq.vf v9, v9, fa0, v0.t
; CHECK-NEXT: vmfeq.vv v8, v8, v8, v0.t
; CHECK-NEXT: vmfeq.vf v9, v9, fa0, v0.t
; CHECK-NEXT: vmand.mm v0, v9, v8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
Expand Down Expand Up @@ -500,8 +500,8 @@ define <vscale x 1 x i1> @fcmp_uno_vf_nxv1f16(<vscale x 1 x half> %va, half %b,
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vfmv.v.f v9, fa0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmfne.vf v9, v9, fa0, v0.t
; CHECK-NEXT: vmfne.vv v8, v8, v8, v0.t
; CHECK-NEXT: vmfne.vf v9, v9, fa0, v0.t
; CHECK-NEXT: vmor.mm v0, v8, v9
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
Expand All @@ -516,8 +516,8 @@ define <vscale x 1 x i1> @fcmp_uno_vf_swap_nxv1f16(<vscale x 1 x half> %va, half
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vfmv.v.f v9, fa0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmfne.vf v9, v9, fa0, v0.t
; CHECK-NEXT: vmfne.vv v8, v8, v8, v0.t
; CHECK-NEXT: vmfne.vf v9, v9, fa0, v0.t
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
Expand Down Expand Up @@ -783,9 +783,9 @@ define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f16(<vscale x 8 x half> %va, half %b,
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vfmv.v.f v10, fa0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmfeq.vf v12, v10, fa0, v0.t
; CHECK-NEXT: vmfeq.vv v10, v8, v8, v0.t
; CHECK-NEXT: vmand.mm v0, v10, v12
; CHECK-NEXT: vmfeq.vv v12, v8, v8, v0.t
; CHECK-NEXT: vmfeq.vf v8, v10, fa0, v0.t
; CHECK-NEXT: vmand.mm v0, v12, v8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
Expand All @@ -799,9 +799,9 @@ define <vscale x 8 x i1> @fcmp_ord_vf_swap_nxv8f16(<vscale x 8 x half> %va, half
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vfmv.v.f v10, fa0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmfeq.vf v12, v10, fa0, v0.t
; CHECK-NEXT: vmfeq.vv v10, v8, v8, v0.t
; CHECK-NEXT: vmand.mm v0, v12, v10
; CHECK-NEXT: vmfeq.vv v12, v8, v8, v0.t
; CHECK-NEXT: vmfeq.vf v8, v10, fa0, v0.t
; CHECK-NEXT: vmand.mm v0, v8, v12
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
Expand Down Expand Up @@ -1052,9 +1052,9 @@ define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f16(<vscale x 8 x half> %va, half %b,
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vfmv.v.f v10, fa0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmfne.vf v12, v10, fa0, v0.t
; CHECK-NEXT: vmfne.vv v10, v8, v8, v0.t
; CHECK-NEXT: vmor.mm v0, v10, v12
; CHECK-NEXT: vmfne.vv v12, v8, v8, v0.t
; CHECK-NEXT: vmfne.vf v8, v10, fa0, v0.t
; CHECK-NEXT: vmor.mm v0, v12, v8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
Expand All @@ -1068,9 +1068,9 @@ define <vscale x 8 x i1> @fcmp_uno_vf_swap_nxv8f16(<vscale x 8 x half> %va, half
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vfmv.v.f v10, fa0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmfne.vf v12, v10, fa0, v0.t
; CHECK-NEXT: vmfne.vv v10, v8, v8, v0.t
; CHECK-NEXT: vmor.mm v0, v12, v10
; CHECK-NEXT: vmfne.vv v12, v8, v8, v0.t
; CHECK-NEXT: vmfne.vf v8, v10, fa0, v0.t
; CHECK-NEXT: vmor.mm v0, v8, v12
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
Expand Down Expand Up @@ -1369,8 +1369,8 @@ define <vscale x 1 x i1> @fcmp_ord_vf_nxv1f64(<vscale x 1 x double> %va, double
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; CHECK-NEXT: vfmv.v.f v9, fa0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfeq.vf v9, v9, fa0, v0.t
; CHECK-NEXT: vmfeq.vv v8, v8, v8, v0.t
; CHECK-NEXT: vmfeq.vf v9, v9, fa0, v0.t
; CHECK-NEXT: vmand.mm v0, v8, v9
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
Expand All @@ -1385,8 +1385,8 @@ define <vscale x 1 x i1> @fcmp_ord_vf_swap_nxv1f64(<vscale x 1 x double> %va, do
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; CHECK-NEXT: vfmv.v.f v9, fa0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfeq.vf v9, v9, fa0, v0.t
; CHECK-NEXT: vmfeq.vv v8, v8, v8, v0.t
; CHECK-NEXT: vmfeq.vf v9, v9, fa0, v0.t
; CHECK-NEXT: vmand.mm v0, v9, v8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
Expand Down Expand Up @@ -1635,8 +1635,8 @@ define <vscale x 1 x i1> @fcmp_uno_vf_nxv1f64(<vscale x 1 x double> %va, double
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; CHECK-NEXT: vfmv.v.f v9, fa0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfne.vf v9, v9, fa0, v0.t
; CHECK-NEXT: vmfne.vv v8, v8, v8, v0.t
; CHECK-NEXT: vmfne.vf v9, v9, fa0, v0.t
; CHECK-NEXT: vmor.mm v0, v8, v9
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
Expand All @@ -1651,8 +1651,8 @@ define <vscale x 1 x i1> @fcmp_uno_vf_swap_nxv1f64(<vscale x 1 x double> %va, do
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; CHECK-NEXT: vfmv.v.f v9, fa0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmfne.vf v9, v9, fa0, v0.t
; CHECK-NEXT: vmfne.vv v8, v8, v8, v0.t
; CHECK-NEXT: vmfne.vf v9, v9, fa0, v0.t
; CHECK-NEXT: vmor.mm v0, v9, v8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
Expand Down Expand Up @@ -1919,9 +1919,9 @@ define <vscale x 8 x i1> @fcmp_ord_vf_nxv8f64(<vscale x 8 x double> %va, double
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; CHECK-NEXT: vfmv.v.f v16, fa0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vf v24, v16, fa0, v0.t
; CHECK-NEXT: vmfeq.vv v16, v8, v8, v0.t
; CHECK-NEXT: vmand.mm v0, v16, v24
; CHECK-NEXT: vmfeq.vv v24, v8, v8, v0.t
; CHECK-NEXT: vmfeq.vf v8, v16, fa0, v0.t
; CHECK-NEXT: vmand.mm v0, v24, v8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
Expand All @@ -1935,9 +1935,9 @@ define <vscale x 8 x i1> @fcmp_ord_vf_swap_nxv8f64(<vscale x 8 x double> %va, do
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; CHECK-NEXT: vfmv.v.f v16, fa0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vf v24, v16, fa0, v0.t
; CHECK-NEXT: vmfeq.vv v16, v8, v8, v0.t
; CHECK-NEXT: vmand.mm v0, v24, v16
; CHECK-NEXT: vmfeq.vv v24, v8, v8, v0.t
; CHECK-NEXT: vmfeq.vf v8, v16, fa0, v0.t
; CHECK-NEXT: vmand.mm v0, v8, v24
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
Expand Down Expand Up @@ -2188,9 +2188,9 @@ define <vscale x 8 x i1> @fcmp_uno_vf_nxv8f64(<vscale x 8 x double> %va, double
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; CHECK-NEXT: vfmv.v.f v16, fa0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfne.vf v24, v16, fa0, v0.t
; CHECK-NEXT: vmfne.vv v16, v8, v8, v0.t
; CHECK-NEXT: vmor.mm v0, v16, v24
; CHECK-NEXT: vmfne.vv v24, v8, v8, v0.t
; CHECK-NEXT: vmfne.vf v8, v16, fa0, v0.t
; CHECK-NEXT: vmor.mm v0, v24, v8
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
Expand All @@ -2204,9 +2204,9 @@ define <vscale x 8 x i1> @fcmp_uno_vf_swap_nxv8f64(<vscale x 8 x double> %va, do
; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; CHECK-NEXT: vfmv.v.f v16, fa0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfne.vf v24, v16, fa0, v0.t
; CHECK-NEXT: vmfne.vv v16, v8, v8, v0.t
; CHECK-NEXT: vmor.mm v0, v24, v16
; CHECK-NEXT: vmfne.vv v24, v8, v8, v0.t
; CHECK-NEXT: vmfne.vf v8, v16, fa0, v0.t
; CHECK-NEXT: vmor.mm v0, v8, v24
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
%vb = shufflevector <vscale x 8 x double> %elt.head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
Expand Down
28 changes: 13 additions & 15 deletions llvm/test/CodeGen/RISCV/rvv/shuffle-reverse.ll
Original file line number Diff line number Diff line change
Expand Up @@ -78,11 +78,11 @@ define <16 x i8> @v8i8_2(<8 x i8> %a, <8 x i8> %b) {
; CHECK-NEXT: vid.v v11
; CHECK-NEXT: vrsub.vi v12, v11, 15
; CHECK-NEXT: vrgather.vv v10, v8, v12
; CHECK-NEXT: vrsub.vi v8, v11, 7
; CHECK-NEXT: li a0, 255
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v0, a0
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vrsub.vi v8, v11, 7
; CHECK-NEXT: vrgather.vv v10, v9, v8, v0.t
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
Expand Down Expand Up @@ -224,11 +224,11 @@ define <16 x i16> @v8i16_2(<8 x i16> %a, <8 x i16> %b) {
; CHECK-NEXT: vid.v v14
; CHECK-NEXT: vrsub.vi v16, v14, 15
; CHECK-NEXT: vrgather.vv v10, v8, v16
; CHECK-NEXT: vrsub.vi v8, v14, 7
; CHECK-NEXT: li a0, 255
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v0, a0
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
; CHECK-NEXT: vrsub.vi v8, v14, 7
; CHECK-NEXT: vrgather.vv v10, v12, v8, v0.t
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
Expand Down Expand Up @@ -341,10 +341,10 @@ define <8 x i32> @v4i32_2(<4 x i32> %a, <4 x i32> %b) {
; CHECK-NEXT: vid.v v14
; CHECK-NEXT: vrsub.vi v16, v14, 7
; CHECK-NEXT: vrgather.vv v10, v8, v16
; CHECK-NEXT: vrsub.vi v8, v14, 3
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.i v0, 15
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; CHECK-NEXT: vrsub.vi v8, v14, 3
; CHECK-NEXT: vrgather.vv v10, v12, v8, v0.t
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
Expand Down Expand Up @@ -373,11 +373,11 @@ define <16 x i32> @v8i32_2(<8 x i32> %a, <8 x i32> %b) {
; CHECK-NEXT: vid.v v20
; CHECK-NEXT: vrsub.vi v24, v20, 15
; CHECK-NEXT: vrgather.vv v12, v8, v24
; CHECK-NEXT: vrsub.vi v8, v20, 7
; CHECK-NEXT: li a0, 255
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v0, a0
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; CHECK-NEXT: vrsub.vi v8, v20, 7
; CHECK-NEXT: vrgather.vv v12, v16, v8, v0.t
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
Expand Down Expand Up @@ -501,10 +501,9 @@ define <8 x i64> @v4i64_2(<4 x i64> %a, <4 x i64> %b) {
; RV32-NEXT: vrsub.vi v19, v18, 7
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; RV32-NEXT: vrgatherei16.vv v12, v8, v19
; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; RV32-NEXT: vmv.v.i v0, 15
; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV32-NEXT: vrsub.vi v8, v18, 3
; RV32-NEXT: vmv.v.i v0, 15
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vrgatherei16.vv v12, v16, v8, v0.t
; RV32-NEXT: vmv.v.v v8, v12
Expand All @@ -517,10 +516,10 @@ define <8 x i64> @v4i64_2(<4 x i64> %a, <4 x i64> %b) {
; RV64-NEXT: vid.v v20
; RV64-NEXT: vrsub.vi v24, v20, 7
; RV64-NEXT: vrgather.vv v12, v8, v24
; RV64-NEXT: vrsub.vi v8, v20, 3
; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; RV64-NEXT: vmv.v.i v0, 15
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV64-NEXT: vrsub.vi v8, v20, 3
; RV64-NEXT: vrgather.vv v12, v16, v8, v0.t
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
Expand Down Expand Up @@ -605,11 +604,11 @@ define <16 x half> @v8f16_2(<8 x half> %a, <8 x half> %b) {
; CHECK-NEXT: vid.v v14
; CHECK-NEXT: vrsub.vi v16, v14, 15
; CHECK-NEXT: vrgather.vv v10, v8, v16
; CHECK-NEXT: vrsub.vi v8, v14, 7
; CHECK-NEXT: li a0, 255
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v0, a0
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
; CHECK-NEXT: vrsub.vi v8, v14, 7
; CHECK-NEXT: vrgather.vv v10, v12, v8, v0.t
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
Expand Down Expand Up @@ -693,10 +692,10 @@ define <8 x float> @v4f32_2(<4 x float> %a, <4 x float> %b) {
; CHECK-NEXT: vid.v v14
; CHECK-NEXT: vrsub.vi v16, v14, 7
; CHECK-NEXT: vrgather.vv v10, v8, v16
; CHECK-NEXT: vrsub.vi v8, v14, 3
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.i v0, 15
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; CHECK-NEXT: vrsub.vi v8, v14, 3
; CHECK-NEXT: vrgather.vv v10, v12, v8, v0.t
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
Expand Down Expand Up @@ -725,11 +724,11 @@ define <16 x float> @v8f32_2(<8 x float> %a, <8 x float> %b) {
; CHECK-NEXT: vid.v v20
; CHECK-NEXT: vrsub.vi v24, v20, 15
; CHECK-NEXT: vrgather.vv v12, v8, v24
; CHECK-NEXT: vrsub.vi v8, v20, 7
; CHECK-NEXT: li a0, 255
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
; CHECK-NEXT: vmv.v.x v0, a0
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; CHECK-NEXT: vrsub.vi v8, v20, 7
; CHECK-NEXT: vrgather.vv v12, v16, v8, v0.t
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
Expand Down Expand Up @@ -796,10 +795,9 @@ define <8 x double> @v4f64_2(<4 x double> %a, <4 x double> %b) {
; RV32-NEXT: vrsub.vi v19, v18, 7
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; RV32-NEXT: vrgatherei16.vv v12, v8, v19
; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; RV32-NEXT: vmv.v.i v0, 15
; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; RV32-NEXT: vrsub.vi v8, v18, 3
; RV32-NEXT: vmv.v.i v0, 15
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vrgatherei16.vv v12, v16, v8, v0.t
; RV32-NEXT: vmv.v.v v8, v12
Expand All @@ -812,10 +810,10 @@ define <8 x double> @v4f64_2(<4 x double> %a, <4 x double> %b) {
; RV64-NEXT: vid.v v20
; RV64-NEXT: vrsub.vi v24, v20, 7
; RV64-NEXT: vrgather.vv v12, v8, v24
; RV64-NEXT: vrsub.vi v8, v20, 3
; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; RV64-NEXT: vmv.v.i v0, 15
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV64-NEXT: vrsub.vi v8, v20, 3
; RV64-NEXT: vrgather.vv v12, v16, v8, v0.t
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
Expand Down
90 changes: 45 additions & 45 deletions llvm/test/CodeGen/RISCV/rvv/sshl_sat_vec.ll
Original file line number Diff line number Diff line change
Expand Up @@ -9,18 +9,18 @@ declare <16 x i8> @llvm.sshl.sat.v16i8(<16 x i8>, <16 x i8>)
define <2 x i64> @vec_v2i64(<2 x i64> %x, <2 x i64> %y) nounwind {
; CHECK-LABEL: vec_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: li a0, -1
; CHECK-NEXT: srli a1, a0, 1
; CHECK-NEXT: vsll.vv v10, v8, v9
; CHECK-NEXT: vsra.vv v9, v10, v9
; CHECK-NEXT: vmsne.vv v8, v8, v9
; CHECK-NEXT: vmv.v.x v9, a1
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT: vmv.v.x v10, a1
; CHECK-NEXT: vsll.vv v11, v8, v9
; CHECK-NEXT: vsra.vv v9, v11, v9
; CHECK-NEXT: vmsne.vv v9, v8, v9
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: slli a0, a0, 63
; CHECK-NEXT: vmerge.vxm v9, v9, a0, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmerge.vvm v8, v10, v9, v0
; CHECK-NEXT: vmerge.vxm v8, v10, a0, v0
; CHECK-NEXT: vmv.v.v v0, v9
; CHECK-NEXT: vmerge.vvm v8, v11, v8, v0
; CHECK-NEXT: ret
%tmp = call <2 x i64> @llvm.sshl.sat.v2i64(<2 x i64> %x, <2 x i64> %y)
ret <2 x i64> %tmp
Expand All @@ -29,19 +29,19 @@ define <2 x i64> @vec_v2i64(<2 x i64> %x, <2 x i64> %y) nounwind {
define <4 x i32> @vec_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind {
; CHECK-LABEL: vec_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: lui a0, 524288
; CHECK-NEXT: addiw a0, a0, -1
; CHECK-NEXT: vsll.vv v10, v8, v9
; CHECK-NEXT: vsra.vv v9, v10, v9
; CHECK-NEXT: vmsne.vv v8, v8, v9
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
; CHECK-NEXT: vsll.vv v11, v8, v9
; CHECK-NEXT: vsra.vv v9, v11, v9
; CHECK-NEXT: vmsne.vv v9, v8, v9
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: li a0, 1
; CHECK-NEXT: slli a0, a0, 31
; CHECK-NEXT: vmerge.vxm v9, v9, a0, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmerge.vvm v8, v10, v9, v0
; CHECK-NEXT: vmerge.vxm v8, v10, a0, v0
; CHECK-NEXT: vmv.v.v v0, v9
; CHECK-NEXT: vmerge.vvm v8, v11, v8, v0
; CHECK-NEXT: ret
%tmp = call <4 x i32> @llvm.sshl.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
ret <4 x i32> %tmp
Expand All @@ -50,10 +50,10 @@ define <4 x i32> @vec_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind {
define <8 x i16> @vec_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind {
; CHECK-LABEL: vec_v8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: lui a0, 8
; CHECK-NEXT: addiw a1, a0, -1
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: vsll.vv v10, v8, v9
; CHECK-NEXT: vsra.vv v9, v10, v9
; CHECK-NEXT: vmsne.vv v8, v8, v9
Expand All @@ -69,17 +69,17 @@ define <8 x i16> @vec_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind {
define <16 x i8> @vec_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind {
; CHECK-LABEL: vec_v16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 127
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
; CHECK-NEXT: vsll.vv v11, v8, v9
; CHECK-NEXT: vsra.vv v9, v11, v9
; CHECK-NEXT: vmsne.vv v9, v8, v9
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: li a0, 127
; CHECK-NEXT: vsll.vv v10, v8, v9
; CHECK-NEXT: vsra.vv v9, v10, v9
; CHECK-NEXT: vmsne.vv v8, v8, v9
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: li a0, 128
; CHECK-NEXT: vmerge.vxm v9, v9, a0, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmerge.vvm v8, v10, v9, v0
; CHECK-NEXT: vmerge.vxm v8, v10, a0, v0
; CHECK-NEXT: vmv.v.v v0, v9
; CHECK-NEXT: vmerge.vvm v8, v11, v8, v0
; CHECK-NEXT: ret
%tmp = call <16 x i8> @llvm.sshl.sat.v16i8(<16 x i8> %x, <16 x i8> %y)
ret <16 x i8> %tmp
Expand All @@ -94,15 +94,15 @@ define <vscale x 2 x i64> @vec_nxv2i64(<vscale x 2 x i64> %x, <vscale x 2 x i64>
; CHECK-LABEL: vec_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: li a0, -1
; CHECK-NEXT: srli a1, a0, 1
; CHECK-NEXT: vsll.vv v12, v8, v10
; CHECK-NEXT: vsra.vv v14, v12, v10
; CHECK-NEXT: vmsne.vv v10, v8, v14
; CHECK-NEXT: vmv.v.x v8, a1
; CHECK-NEXT: li a0, -1
; CHECK-NEXT: srli a1, a0, 1
; CHECK-NEXT: vmv.v.x v14, a1
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: slli a0, a0, 63
; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
; CHECK-NEXT: vmerge.vxm v8, v14, a0, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
; CHECK-NEXT: ret
Expand All @@ -114,16 +114,16 @@ define <vscale x 4 x i32> @vec_nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32>
; CHECK-LABEL: vec_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: lui a0, 524288
; CHECK-NEXT: addiw a0, a0, -1
; CHECK-NEXT: vsll.vv v12, v8, v10
; CHECK-NEXT: vsra.vv v14, v12, v10
; CHECK-NEXT: vmsne.vv v10, v8, v14
; CHECK-NEXT: vmv.v.x v8, a0
; CHECK-NEXT: lui a0, 524288
; CHECK-NEXT: addiw a0, a0, -1
; CHECK-NEXT: vmv.v.x v14, a0
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: li a0, 1
; CHECK-NEXT: slli a0, a0, 31
; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
; CHECK-NEXT: vmerge.vxm v8, v14, a0, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
; CHECK-NEXT: ret
Expand All @@ -135,12 +135,12 @@ define <vscale x 8 x i16> @vec_nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16>
; CHECK-LABEL: vec_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: lui a0, 8
; CHECK-NEXT: addiw a1, a0, -1
; CHECK-NEXT: vsll.vv v12, v8, v10
; CHECK-NEXT: vsra.vv v14, v12, v10
; CHECK-NEXT: vmsne.vv v10, v8, v14
; CHECK-NEXT: lui a0, 8
; CHECK-NEXT: addiw a1, a0, -1
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: vmv.v.x v8, a1
; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
; CHECK-NEXT: vmv1r.v v0, v10
Expand All @@ -154,14 +154,14 @@ define <vscale x 16 x i8> @vec_nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8>
; CHECK-LABEL: vec_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: li a0, 127
; CHECK-NEXT: vsll.vv v12, v8, v10
; CHECK-NEXT: vsra.vv v14, v12, v10
; CHECK-NEXT: vmsne.vv v10, v8, v14
; CHECK-NEXT: vmv.v.x v8, a0
; CHECK-NEXT: li a0, 127
; CHECK-NEXT: vmv.v.x v14, a0
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: li a0, 128
; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
; CHECK-NEXT: vmerge.vxm v8, v14, a0, v0
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
; CHECK-NEXT: ret
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,12 @@ define {<16 x i1>, <16 x i1>} @vector_deinterleave_v16i1_v32i1(<32 x i1> %vec) {
; RV32-NEXT: vslidedown.vi v0, v0, 2
; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV32-NEXT: vmerge.vim v8, v8, 1, v0
; RV32-NEXT: vadd.vi v12, v11, -16
; RV32-NEXT: lui a0, 16
; RV32-NEXT: addi a0, a0, -256
; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
; RV32-NEXT: vmv.v.x v0, a0
; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; RV32-NEXT: vadd.vi v12, v11, -16
; RV32-NEXT: vrgather.vv v9, v8, v12, v0.t
; RV32-NEXT: vmsne.vi v9, v9, 0
; RV32-NEXT: vadd.vi v12, v11, 1
Expand All @@ -45,12 +45,12 @@ define {<16 x i1>, <16 x i1>} @vector_deinterleave_v16i1_v32i1(<32 x i1> %vec) {
; RV64-NEXT: vslidedown.vi v0, v0, 2
; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV64-NEXT: vmerge.vim v8, v8, 1, v0
; RV64-NEXT: vadd.vi v12, v11, -16
; RV64-NEXT: lui a0, 16
; RV64-NEXT: addiw a0, a0, -256
; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
; RV64-NEXT: vmv.v.x v0, a0
; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; RV64-NEXT: vadd.vi v12, v11, -16
; RV64-NEXT: vrgather.vv v9, v8, v12, v0.t
; RV64-NEXT: vmsne.vi v9, v9, 0
; RV64-NEXT: vadd.vi v12, v11, 1
Expand Down
7 changes: 4 additions & 3 deletions llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,13 @@
define <32 x i1> @vector_interleave_v32i1_v16i1(<16 x i1> %a, <16 x i1> %b) {
; CHECK-LABEL: vector_interleave_v32i1_v16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vmv.v.i v10, 0
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; CHECK-NEXT: vslideup.vi v0, v8, 2
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
; CHECK-NEXT: vsetivli zero, 16, e8, m2, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v8, 16
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
Expand Down
18 changes: 9 additions & 9 deletions llvm/test/CodeGen/RISCV/rvv/vmv-copy.mir
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype
; CHECK-NEXT: $v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype
; CHECK-NEXT: $v12m4 = PseudoVMV_V_V_M4 $v28m4, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype
; CHECK-NEXT: $v12m4 = PseudoVMV_V_V_M4 undef $v12m4, $v28m4, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
$x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype
$v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype
$v12m4 = COPY $v28m4
Expand All @@ -47,10 +47,10 @@ body: |
; CHECK: liveins: $x14
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype
; CHECK-NEXT: $v28m4 = PseudoVMV_V_I_M4 0, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype
; CHECK-NEXT: $v12m4 = PseudoVMV_V_I_M4 0, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype
; CHECK-NEXT: $v28m4 = PseudoVMV_V_I_M4 undef $v28m4, 0, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
; CHECK-NEXT: $v12m4 = PseudoVMV_V_I_M4 undef $v12m4, 0, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
$x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype
$v28m4 = PseudoVMV_V_I_M4 0, $noreg, 5, implicit $vl, implicit $vtype
$v28m4 = PseudoVMV_V_I_M4 undef $v28m4, 0, $noreg, 5, 0, implicit $vl, implicit $vtype
$v12m4 = COPY $v28m4
...
---
Expand Down Expand Up @@ -81,11 +81,11 @@ body: |
; CHECK: liveins: $x14, $x16
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype
; CHECK-NEXT: $v28m4 = PseudoVMV_V_I_M4 0, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype
; CHECK-NEXT: $v28m4 = PseudoVMV_V_I_M4 undef $v28m4, 0, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
; CHECK-NEXT: $v4m4, $x0 = PseudoVLE32FF_V_M4 $x16, $noreg, 5 /* e32 */, implicit-def $vl
; CHECK-NEXT: $v12m4 = VMV4R_V $v28m4
$x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype
$v28m4 = PseudoVMV_V_I_M4 0, $noreg, 5, implicit $vl, implicit $vtype
$v28m4 = PseudoVMV_V_I_M4 undef $v28m4, 0, $noreg, 5, 0, implicit $vl, implicit $vtype
$v4m4,$x0 = PseudoVLE32FF_V_M4 $x16, $noreg, 5, implicit-def $vl
$v12m4 = COPY $v28m4
...
Expand Down Expand Up @@ -132,7 +132,7 @@ body: |
; CHECK-NEXT: $v0m2 = PseudoVLE32_V_M2 $x18, $noreg, 4 /* e16 */, implicit $vl, implicit $vtype
; CHECK-NEXT: $x0 = PseudoVSETVLIX0 $x0, 82 /* e32, m4, ta, mu */, implicit-def $vl, implicit-def $vtype
; CHECK-NEXT: $v4m4 = PseudoVLE32_V_M4 killed $x18, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype
; CHECK-NEXT: $v12m4 = PseudoVMV_V_V_M4 $v28m4, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype
; CHECK-NEXT: $v12m4 = PseudoVMV_V_V_M4 undef $v12m4, $v28m4, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
$x15 = PseudoVSETVLI $x14, 82, implicit-def $vl, implicit-def $vtype
$v28m4 = PseudoVLE32_V_M4 killed $x16, $noreg, 5, implicit $vl, implicit $vtype
$x0 = PseudoVSETVLIX0 $x0, 73, implicit-def $vl, implicit-def $vtype
Expand Down Expand Up @@ -253,8 +253,8 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: $x15 = PseudoVSETVLI $x14, 80 /* e32, m1, ta, mu */, implicit-def $vl, implicit-def $vtype
; CHECK-NEXT: $v8_v9 = PseudoVLSEG2E32_V_M1 killed $x16, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype
; CHECK-NEXT: $v10 = PseudoVMV_V_V_M1 $v8, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype
; CHECK-NEXT: $v11 = PseudoVMV_V_V_M1 $v9, $noreg, 5 /* e32 */, implicit $vl, implicit $vtype
; CHECK-NEXT: $v10 = PseudoVMV_V_V_M1 undef $v10, $v8, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
; CHECK-NEXT: $v11 = PseudoVMV_V_V_M1 undef $v11, $v9, $noreg, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
$x15 = PseudoVSETVLI $x14, 80, implicit-def $vl, implicit-def $vtype
$v8_v9 = PseudoVLSEG2E32_V_M1 killed $x16, $noreg, 5, implicit $vl, implicit $vtype
$v10_v11 = COPY $v8_v9
Expand Down
46 changes: 22 additions & 24 deletions llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll
Original file line number Diff line number Diff line change
Expand Up @@ -978,14 +978,14 @@ declare half @llvm.vector.reduce.fadd.nxv3f16(half, <vscale x 3 x half>)
define half @vreduce_ord_fadd_nxv3f16(<vscale x 3 x half> %v, half %s) {
; CHECK-LABEL: vreduce_ord_fadd_nxv3f16:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1048568
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: slli a1, a0, 1
; CHECK-NEXT: add a1, a1, a0
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: lui a2, 1048568
; CHECK-NEXT: vsetvli a3, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.x v9, a2
; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
; CHECK-NEXT: vslideup.vx v8, v9, a1
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
Expand All @@ -1002,16 +1002,16 @@ declare half @llvm.vector.reduce.fadd.nxv6f16(half, <vscale x 6 x half>)
define half @vreduce_ord_fadd_nxv6f16(<vscale x 6 x half> %v, half %s) {
; CHECK-LABEL: vreduce_ord_fadd_nxv6f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v10, fa0
; CHECK-NEXT: lui a0, 1048568
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
; CHECK-NEXT: vmv.v.x v11, a0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: add a1, a0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
; CHECK-NEXT: vslideup.vx v9, v10, a0
; CHECK-NEXT: vslideup.vx v9, v11, a0
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; CHECK-NEXT: vfmv.s.f v10, fa0
; CHECK-NEXT: vfredosum.vs v8, v8, v10
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
Expand All @@ -1029,13 +1029,12 @@ define half @vreduce_ord_fadd_nxv10f16(<vscale x 10 x half> %v, half %s) {
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: add a1, a0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
; CHECK-NEXT: vslideup.vx v10, v12, a0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
; CHECK-NEXT: vmv.v.v v11, v12
; CHECK-NEXT: add a1, a0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
; CHECK-NEXT: vslideup.vx v11, v12, a0
; CHECK-NEXT: vslideup.vx v10, v12, a0
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; CHECK-NEXT: vfmv.s.f v12, fa0
; CHECK-NEXT: vfredosum.vs v8, v8, v12
Expand Down Expand Up @@ -1066,14 +1065,14 @@ define half @vreduce_ord_fadd_nxv12f16(<vscale x 12 x half> %v, half %s) {
define half @vreduce_fadd_nxv3f16(<vscale x 3 x half> %v, half %s) {
; CHECK-LABEL: vreduce_fadd_nxv3f16:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, 1048568
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: slli a1, a0, 1
; CHECK-NEXT: add a1, a1, a0
; CHECK-NEXT: add a0, a1, a0
; CHECK-NEXT: lui a2, 1048568
; CHECK-NEXT: vsetvli a3, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.x v9, a2
; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
; CHECK-NEXT: vslideup.vx v8, v9, a1
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
Expand All @@ -1088,16 +1087,16 @@ define half @vreduce_fadd_nxv3f16(<vscale x 3 x half> %v, half %s) {
define half @vreduce_fadd_nxv6f16(<vscale x 6 x half> %v, half %s) {
; CHECK-LABEL: vreduce_fadd_nxv6f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v10, fa0
; CHECK-NEXT: lui a0, 1048568
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.x v10, a0
; CHECK-NEXT: vmv.v.x v11, a0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: add a1, a0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
; CHECK-NEXT: vslideup.vx v9, v10, a0
; CHECK-NEXT: vslideup.vx v9, v11, a0
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
; CHECK-NEXT: vfmv.s.f v10, fa0
; CHECK-NEXT: vfredusum.vs v8, v8, v10
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
Expand All @@ -1110,19 +1109,18 @@ declare half @llvm.vector.reduce.fmin.nxv10f16(<vscale x 10 x half>)
define half @vreduce_fmin_nxv10f16(<vscale x 10 x half> %v) {
; CHECK-LABEL: vreduce_fmin_nxv10f16:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, %hi(.LCPI73_0)
; CHECK-NEXT: flh fa5, %lo(.LCPI73_0)(a0)
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: vfmv.v.f v12, fa5
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: lui a1, %hi(.LCPI73_0)
; CHECK-NEXT: flh fa5, %lo(.LCPI73_0)(a1)
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: add a1, a0, a0
; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma
; CHECK-NEXT: vfmv.v.f v12, fa5
; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
; CHECK-NEXT: vslideup.vx v10, v12, a0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
; CHECK-NEXT: vmv.v.v v11, v12
; CHECK-NEXT: add a1, a0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
; CHECK-NEXT: vslideup.vx v11, v12, a0
; CHECK-NEXT: vslideup.vx v10, v12, a0
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; CHECK-NEXT: vfmv.s.f v12, fa5
; CHECK-NEXT: vfredmin.vs v8, v8, v12
Expand Down
24 changes: 16 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
Original file line number Diff line number Diff line change
Expand Up @@ -284,15 +284,17 @@ body: |
; CHECK-NEXT: bb.1.if.then:
; CHECK-NEXT: successors: %bb.3(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %dead1:vr = IMPLICIT_DEF
; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
; CHECK-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_M1 [[PseudoVLE32_V_MF2_]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype
; CHECK-NEXT: early-clobber %1:vr = PseudoVZEXT_VF2_M1 %dead1, [[PseudoVLE32_V_MF2_]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
; CHECK-NEXT: PseudoBR %bb.3
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.2.if.else:
; CHECK-NEXT: successors: %bb.3(0x80000000)
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: %dead2:vr = IMPLICIT_DEF
; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
; CHECK-NEXT: early-clobber %2:vr = PseudoVSEXT_VF2_M1 [[PseudoVLE32_V_MF2_]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype
; CHECK-NEXT: early-clobber %2:vr = PseudoVSEXT_VF2_M1 %dead2, [[PseudoVLE32_V_MF2_]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.3.if.end:
; CHECK-NEXT: [[PHI:%[0-9]+]]:vr = PHI %1, %bb.1, %2, %bb.2
Expand All @@ -312,11 +314,13 @@ body: |
PseudoBR %bb.1
bb.1.if.then:
early-clobber %1:vr = PseudoVZEXT_VF2_M1 %0, %7, 6
%dead1:vr = IMPLICIT_DEF
early-clobber %1:vr = PseudoVZEXT_VF2_M1 %dead1, %0, %7, 6, 0
PseudoBR %bb.3
bb.2.if.else:
early-clobber %2:vr = PseudoVSEXT_VF2_M1 %0, %7, 6
%dead2:vr = IMPLICIT_DEF
early-clobber %2:vr = PseudoVSEXT_VF2_M1 %dead2, %0, %7, 6, 0
bb.3.if.end:
%3:vr = PHI %1, %bb.1, %2, %bb.2
Expand Down Expand Up @@ -510,8 +514,9 @@ body: |
; CHECK-NEXT: %pt:vr = IMPLICIT_DEF
; CHECK-NEXT: dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 $x0, 223 /* e64, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype
; CHECK-NEXT: [[PseudoVID_V_MF2_:%[0-9]+]]:vr = PseudoVID_V_MF2 %pt, -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
; CHECK-NEXT: %pt2:vr = IMPLICIT_DEF
; CHECK-NEXT: dead [[PseudoVSETVLIX0_1:%[0-9]+]]:gpr = PseudoVSETVLIX0 $x0, 215 /* e32, mf2, ta, ma */, implicit-def $vl, implicit-def $vtype
; CHECK-NEXT: [[PseudoVMV_V_I_MF2_:%[0-9]+]]:vrnov0 = PseudoVMV_V_I_MF2 0, -1, 5 /* e32 */, implicit $vl, implicit $vtype
; CHECK-NEXT: [[PseudoVMV_V_I_MF2_:%[0-9]+]]:vrnov0 = PseudoVMV_V_I_MF2 %pt2, 0, -1, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
; CHECK-NEXT: successors: %bb.2(0x40000000), %bb.3(0x40000000)
Expand Down Expand Up @@ -546,7 +551,8 @@ body: |
%2:gpr = IMPLICIT_DEF
%pt:vr = IMPLICIT_DEF
%3:vr = PseudoVID_V_MF2 %pt, -1, 6, 0
%4:vrnov0 = PseudoVMV_V_I_MF2 0, -1, 5
%pt2:vr = IMPLICIT_DEF
%4:vrnov0 = PseudoVMV_V_I_MF2 %pt2, 0, -1, 5, 0
bb.1:
successors: %bb.2(0x40000000), %bb.3(0x40000000)
Expand Down Expand Up @@ -761,8 +767,9 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12
; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10
; CHECK-NEXT: %dead:vr = IMPLICIT_DEF
; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 4, 208 /* e32, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
; CHECK-NEXT: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 0, 4, 5 /* e32 */, implicit $vl, implicit $vtype
; CHECK-NEXT: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 %dead, 0, 4, 5 /* e32 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY [[PseudoVMV_V_I_M1_]]
; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY [[COPY2]]
; CHECK-NEXT: [[LUI:%[0-9]+]]:gpr = LUI 1
Expand Down Expand Up @@ -796,7 +803,8 @@ body: |
%8:gpr = COPY $x12
%6:gpr = COPY $x10
%11:vr = PseudoVMV_V_I_M1 0, 4, 5
%dead:vr = IMPLICIT_DEF
%11:vr = PseudoVMV_V_I_M1 %dead, 0, 4, 5, 0
%12:vr = COPY %11
%10:vr = COPY %12
%13:gpr = LUI 1
Expand Down
18 changes: 10 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir
Original file line number Diff line number Diff line change
Expand Up @@ -200,13 +200,15 @@ body: |
; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10
; CHECK-NEXT: dead $x0 = PseudoVSETVLI [[COPY]], 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
; CHECK-NEXT: [[PseudoVLE32_V_MF2_:%[0-9]+]]:vr = PseudoVLE32_V_MF2 [[COPY1]], $noreg, 5 /* e32 */, implicit $vl, implicit $vtype
; CHECK-NEXT: early-clobber %3:vr = PseudoVZEXT_VF2_M1 killed [[PseudoVLE32_V_MF2_]], $noreg, 6 /* e64 */, implicit $vl, implicit $vtype
; CHECK-NEXT: %dead:vr = IMPLICIT_DEF
; CHECK-NEXT: early-clobber %3:vr = PseudoVZEXT_VF2_M1 %dead, killed [[PseudoVLE32_V_MF2_]], $noreg, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
; CHECK-NEXT: $v8 = COPY %3
; CHECK-NEXT: PseudoRET implicit $v8
%1:gprnox0 = COPY $x11
%0:gpr = COPY $x10
%2:vr = PseudoVLE32_V_MF2 %0, %1, 5
early-clobber %3:vr = PseudoVZEXT_VF2_M1 killed %2, %1, 6
%dead:vr = IMPLICIT_DEF
early-clobber %3:vr = PseudoVZEXT_VF2_M1 %dead, killed %2, %1, 6, 0
$v8 = COPY %3
PseudoRET implicit $v8
Expand Down Expand Up @@ -307,8 +309,8 @@ body: |
; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x10
; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
; CHECK-NEXT: [[PseudoVLE64_V_M1_:%[0-9]+]]:vr = PseudoVLE64_V_M1 [[COPY]], 2, 6 /* e64 */, implicit $vl, implicit $vtype :: (load (s128) from %ir.x)
; CHECK-NEXT: dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 $x0, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
; CHECK-NEXT: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 0, -1, 6 /* e64 */, implicit $vl, implicit $vtype
; CHECK-NEXT: dead [[PseudoVSETVLIX0_:%[0-9]+]]:gpr = PseudoVSETVLIX0 $x0, 152 /* e64, m1, tu, ma */, implicit-def $vl, implicit-def $vtype
; CHECK-NEXT: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 undef $v2, 0, -1, 6 /* e64 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 216 /* e64, m1, ta, ma */, implicit-def $vl, implicit-def $vtype
; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_E8_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1_E8 [[DEF]], killed [[PseudoVLE64_V_M1_]], killed [[PseudoVMV_V_I_M1_]], 2, 6 /* e64 */, 1 /* ta, mu */, implicit $vl, implicit $vtype
Expand All @@ -317,7 +319,7 @@ body: |
; CHECK-NEXT: PseudoRET implicit $x10
%0:gpr = COPY $x10
%1:vr = PseudoVLE64_V_M1 %0, 2, 6 :: (load (s128) from %ir.x)
%2:vr = PseudoVMV_V_I_M1 0, -1, 6
%2:vr = PseudoVMV_V_I_M1 undef $v2, 0, -1, 6, 0
%4:vr = IMPLICIT_DEF
%3:vr = PseudoVREDSUM_VS_M1_E8 %4, killed %1, killed %2, 2, 6, 1
%5:gpr = PseudoVMV_X_S_M1 killed %3, 6
Expand Down Expand Up @@ -422,11 +424,11 @@ body: |
; CHECK-NEXT: %pt:vrm2 = IMPLICIT_DEF
; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 4, 217 /* e64, m2, ta, ma */, implicit-def $vl, implicit-def $vtype
; CHECK-NEXT: [[PseudoVID_V_M2_:%[0-9]+]]:vrm2 = PseudoVID_V_M2 %pt, 4, 6 /* e64 */, 3 /* ta, ma */, implicit $vl, implicit $vtype
; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 198 /* e8, mf4, ta, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
; CHECK-NEXT: [[PseudoVMV_V_I_MF4_:%[0-9]+]]:vr = PseudoVMV_V_I_MF4 0, 4, 3 /* e8 */, implicit $vl, implicit $vtype
; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 134 /* e8, mf4, tu, ma */, implicit-def $vl, implicit-def $vtype, implicit $vl
; CHECK-NEXT: [[PseudoVMV_V_I_MF4_:%[0-9]+]]:vr = PseudoVMV_V_I_MF4 undef [[PseudoVMV_V_I_MF4_]], 0, 4, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
; CHECK-NEXT: PseudoRET
%pt:vrm2 = IMPLICIT_DEF
%0:vrm2 = PseudoVID_V_M2 %pt, 4, 6, 3
%4:vr = PseudoVMV_V_I_MF4 0, 4, 3
%4:vr = PseudoVMV_V_I_MF4 undef %4, 0, 4, 3, 0
PseudoRET
...
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ body: |
; CHECK-NEXT: $x10 = ADD $x2, killed $x10
; CHECK-NEXT: SD killed renamable $x16, killed $x10, 64 :: (store (s64) into %fixed-stack.1, align 16)
; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 2, 69 /* e8, mf8, ta, mu */, implicit-def $vl, implicit-def $vtype
; CHECK-NEXT: renamable $v8 = PseudoVMV_V_I_MF8 0, 2, 3 /* e8 */, implicit $vl, implicit $vtype
; CHECK-NEXT: renamable $v8 = PseudoVMV_V_I_MF8 undef $v8, 0, 2, 3 /* e8 */, 0 /* tu, mu */, implicit $vl, implicit $vtype
; CHECK-NEXT: $x10 = ADDI $x2, 32
; CHECK-NEXT: VS1R_V killed renamable $v8, killed $x10 :: (store unknown-size into %stack.1, align 8)
; CHECK-NEXT: {{ $}}
Expand Down Expand Up @@ -200,7 +200,7 @@ body: |
SD killed renamable $x17, %fixed-stack.0, 0 :: (store (s64))
SD killed renamable $x16, %fixed-stack.1, 0 :: (store (s64) into %fixed-stack.1, align 16)
dead $x0 = PseudoVSETIVLI 2, 69, implicit-def $vl, implicit-def $vtype
renamable $v8 = PseudoVMV_V_I_MF8 0, 2, 3, implicit $vl, implicit $vtype
renamable $v8 = PseudoVMV_V_I_MF8 undef $v8, 0, 2, 3, 0, implicit $vl, implicit $vtype
VS1R_V killed renamable $v8, %stack.1 :: (store unknown-size into %stack.1, align 8)
bb.1.while.cond:
Expand Down