152 changes: 76 additions & 76 deletions llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll

Large diffs are not rendered by default.

152 changes: 76 additions & 76 deletions llvm/test/CodeGen/RISCV/rvv/cttz-sdnode.ll

Large diffs are not rendered by default.

90 changes: 45 additions & 45 deletions llvm/test/CodeGen/RISCV/rvv/extload-truncstore.ll
Original file line number Diff line number Diff line change
Expand Up @@ -410,7 +410,7 @@ define void @truncstore_nxv1i16_nxv1i8(<vscale x 1 x i16> %x, <vscale x 1 x i8>*
; CHECK-LABEL: truncstore_nxv1i16_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
%y = trunc <vscale x 1 x i16> %x to <vscale x 1 x i8>
Expand Down Expand Up @@ -474,7 +474,7 @@ define void @truncstore_nxv2i16_nxv2i8(<vscale x 2 x i16> %x, <vscale x 2 x i8>*
; CHECK-LABEL: truncstore_nxv2i16_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
%y = trunc <vscale x 2 x i16> %x to <vscale x 2 x i8>
Expand Down Expand Up @@ -538,7 +538,7 @@ define void @truncstore_nxv4i16_nxv4i8(<vscale x 4 x i16> %x, <vscale x 4 x i8>*
; CHECK-LABEL: truncstore_nxv4i16_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
%y = trunc <vscale x 4 x i16> %x to <vscale x 4 x i8>
Expand Down Expand Up @@ -598,7 +598,7 @@ define void @truncstore_nxv8i16_nxv8i8(<vscale x 8 x i16> %x, <vscale x 8 x i8>*
; CHECK-LABEL: truncstore_nxv8i16_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
; CHECK-NEXT: vnsrl.wi v10, v8, 0
; CHECK-NEXT: vnsrl.wx v10, v8, zero
; CHECK-NEXT: vs1r.v v10, (a0)
; CHECK-NEXT: ret
%y = trunc <vscale x 8 x i16> %x to <vscale x 8 x i8>
Expand Down Expand Up @@ -658,7 +658,7 @@ define void @truncstore_nxv16i16_nxv16i8(<vscale x 16 x i16> %x, <vscale x 16 x
; CHECK-LABEL: truncstore_nxv16i16_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
; CHECK-NEXT: vnsrl.wi v12, v8, 0
; CHECK-NEXT: vnsrl.wx v12, v8, zero
; CHECK-NEXT: vs2r.v v12, (a0)
; CHECK-NEXT: ret
%y = trunc <vscale x 16 x i16> %x to <vscale x 16 x i8>
Expand Down Expand Up @@ -694,7 +694,7 @@ define void @truncstore_nxv32i16_nxv32i8(<vscale x 32 x i16> %x, <vscale x 32 x
; CHECK-LABEL: truncstore_nxv32i16_nxv32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
; CHECK-NEXT: vnsrl.wi v16, v8, 0
; CHECK-NEXT: vnsrl.wx v16, v8, zero
; CHECK-NEXT: vs4r.v v16, (a0)
; CHECK-NEXT: ret
%y = trunc <vscale x 32 x i16> %x to <vscale x 32 x i8>
Expand All @@ -706,9 +706,9 @@ define void @truncstore_nxv1i32_nxv1i8(<vscale x 1 x i32> %x, <vscale x 1 x i8>*
; CHECK-LABEL: truncstore_nxv1i32_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
%y = trunc <vscale x 1 x i32> %x to <vscale x 1 x i8>
Expand All @@ -720,7 +720,7 @@ define void @truncstore_nxv1i32_nxv1i16(<vscale x 1 x i32> %x, <vscale x 1 x i16
; CHECK-LABEL: truncstore_nxv1i32_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: ret
%y = trunc <vscale x 1 x i32> %x to <vscale x 1 x i16>
Expand Down Expand Up @@ -758,9 +758,9 @@ define void @truncstore_nxv2i32_nxv2i8(<vscale x 2 x i32> %x, <vscale x 2 x i8>*
; CHECK-LABEL: truncstore_nxv2i32_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
%y = trunc <vscale x 2 x i32> %x to <vscale x 2 x i8>
Expand All @@ -772,7 +772,7 @@ define void @truncstore_nxv2i32_nxv2i16(<vscale x 2 x i32> %x, <vscale x 2 x i16
; CHECK-LABEL: truncstore_nxv2i32_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: ret
%y = trunc <vscale x 2 x i32> %x to <vscale x 2 x i16>
Expand Down Expand Up @@ -808,9 +808,9 @@ define void @truncstore_nxv4i32_nxv4i8(<vscale x 4 x i32> %x, <vscale x 4 x i8>*
; CHECK-LABEL: truncstore_nxv4i32_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vnsrl.wi v10, v8, 0
; CHECK-NEXT: vnsrl.wx v10, v8, zero
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: vnsrl.wx v8, v10, zero
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
%y = trunc <vscale x 4 x i32> %x to <vscale x 4 x i8>
Expand All @@ -822,7 +822,7 @@ define void @truncstore_nxv4i32_nxv4i16(<vscale x 4 x i32> %x, <vscale x 4 x i16
; CHECK-LABEL: truncstore_nxv4i32_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vnsrl.wi v10, v8, 0
; CHECK-NEXT: vnsrl.wx v10, v8, zero
; CHECK-NEXT: vs1r.v v10, (a0)
; CHECK-NEXT: ret
%y = trunc <vscale x 4 x i32> %x to <vscale x 4 x i16>
Expand Down Expand Up @@ -858,9 +858,9 @@ define void @truncstore_nxv8i32_nxv8i8(<vscale x 8 x i32> %x, <vscale x 8 x i8>*
; CHECK-LABEL: truncstore_nxv8i32_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vnsrl.wi v12, v8, 0
; CHECK-NEXT: vnsrl.wx v12, v8, zero
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v12, 0
; CHECK-NEXT: vnsrl.wx v8, v12, zero
; CHECK-NEXT: vs1r.v v8, (a0)
; CHECK-NEXT: ret
%y = trunc <vscale x 8 x i32> %x to <vscale x 8 x i8>
Expand All @@ -872,7 +872,7 @@ define void @truncstore_nxv8i32_nxv8i16(<vscale x 8 x i32> %x, <vscale x 8 x i16
; CHECK-LABEL: truncstore_nxv8i32_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vnsrl.wi v12, v8, 0
; CHECK-NEXT: vnsrl.wx v12, v8, zero
; CHECK-NEXT: vs2r.v v12, (a0)
; CHECK-NEXT: ret
%y = trunc <vscale x 8 x i32> %x to <vscale x 8 x i16>
Expand Down Expand Up @@ -908,9 +908,9 @@ define void @truncstore_nxv16i32_nxv16i8(<vscale x 16 x i32> %x, <vscale x 16 x
; CHECK-LABEL: truncstore_nxv16i32_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vnsrl.wi v16, v8, 0
; CHECK-NEXT: vnsrl.wx v16, v8, zero
; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v16, 0
; CHECK-NEXT: vnsrl.wx v8, v16, zero
; CHECK-NEXT: vs2r.v v8, (a0)
; CHECK-NEXT: ret
%y = trunc <vscale x 16 x i32> %x to <vscale x 16 x i8>
Expand All @@ -922,7 +922,7 @@ define void @truncstore_nxv16i32_nxv16i16(<vscale x 16 x i32> %x, <vscale x 16 x
; CHECK-LABEL: truncstore_nxv16i32_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
; CHECK-NEXT: vnsrl.wi v16, v8, 0
; CHECK-NEXT: vnsrl.wx v16, v8, zero
; CHECK-NEXT: vs4r.v v16, (a0)
; CHECK-NEXT: ret
%y = trunc <vscale x 16 x i32> %x to <vscale x 16 x i16>
Expand All @@ -934,11 +934,11 @@ define void @truncstore_nxv1i64_nxv1i8(<vscale x 1 x i64> %x, <vscale x 1 x i8>*
; CHECK-LABEL: truncstore_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
%y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i8>
Expand All @@ -950,9 +950,9 @@ define void @truncstore_nxv1i64_nxv1i16(<vscale x 1 x i64> %x, <vscale x 1 x i16
; CHECK-LABEL: truncstore_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: ret
%y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i16>
Expand All @@ -964,7 +964,7 @@ define void @truncstore_nxv1i64_nxv1i32(<vscale x 1 x i64> %x, <vscale x 1 x i32
; CHECK-LABEL: truncstore_nxv1i64_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
%y = trunc <vscale x 1 x i64> %x to <vscale x 1 x i32>
Expand All @@ -976,11 +976,11 @@ define void @truncstore_nxv2i64_nxv2i8(<vscale x 2 x i64> %x, <vscale x 2 x i8>*
; CHECK-LABEL: truncstore_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vnsrl.wi v10, v8, 0
; CHECK-NEXT: vnsrl.wx v10, v8, zero
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: vnsrl.wx v8, v10, zero
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
%y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i8>
Expand All @@ -992,9 +992,9 @@ define void @truncstore_nxv2i64_nxv2i16(<vscale x 2 x i64> %x, <vscale x 2 x i16
; CHECK-LABEL: truncstore_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vnsrl.wi v10, v8, 0
; CHECK-NEXT: vnsrl.wx v10, v8, zero
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: vnsrl.wx v8, v10, zero
; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: ret
%y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i16>
Expand All @@ -1006,7 +1006,7 @@ define void @truncstore_nxv2i64_nxv2i32(<vscale x 2 x i64> %x, <vscale x 2 x i32
; CHECK-LABEL: truncstore_nxv2i64_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
; CHECK-NEXT: vnsrl.wi v10, v8, 0
; CHECK-NEXT: vnsrl.wx v10, v8, zero
; CHECK-NEXT: vs1r.v v10, (a0)
; CHECK-NEXT: ret
%y = trunc <vscale x 2 x i64> %x to <vscale x 2 x i32>
Expand All @@ -1018,11 +1018,11 @@ define void @truncstore_nxv4i64_nxv4i8(<vscale x 4 x i64> %x, <vscale x 4 x i8>*
; CHECK-LABEL: truncstore_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vnsrl.wi v12, v8, 0
; CHECK-NEXT: vnsrl.wx v12, v8, zero
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v12, 0
; CHECK-NEXT: vnsrl.wx v8, v12, zero
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
%y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i8>
Expand All @@ -1034,9 +1034,9 @@ define void @truncstore_nxv4i64_nxv4i16(<vscale x 4 x i64> %x, <vscale x 4 x i16
; CHECK-LABEL: truncstore_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vnsrl.wi v12, v8, 0
; CHECK-NEXT: vnsrl.wx v12, v8, zero
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v12, 0
; CHECK-NEXT: vnsrl.wx v8, v12, zero
; CHECK-NEXT: vs1r.v v8, (a0)
; CHECK-NEXT: ret
%y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i16>
Expand All @@ -1048,7 +1048,7 @@ define void @truncstore_nxv4i64_nxv4i32(<vscale x 4 x i64> %x, <vscale x 4 x i32
; CHECK-LABEL: truncstore_nxv4i64_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; CHECK-NEXT: vnsrl.wi v12, v8, 0
; CHECK-NEXT: vnsrl.wx v12, v8, zero
; CHECK-NEXT: vs2r.v v12, (a0)
; CHECK-NEXT: ret
%y = trunc <vscale x 4 x i64> %x to <vscale x 4 x i32>
Expand All @@ -1060,11 +1060,11 @@ define void @truncstore_nxv8i64_nxv8i8(<vscale x 8 x i64> %x, <vscale x 8 x i8>*
; CHECK-LABEL: truncstore_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vnsrl.wi v16, v8, 0
; CHECK-NEXT: vnsrl.wx v16, v8, zero
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v16, 0
; CHECK-NEXT: vnsrl.wx v8, v16, zero
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vnsrl.wi v10, v8, 0
; CHECK-NEXT: vnsrl.wx v10, v8, zero
; CHECK-NEXT: vs1r.v v10, (a0)
; CHECK-NEXT: ret
%y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i8>
Expand All @@ -1076,9 +1076,9 @@ define void @truncstore_nxv8i64_nxv8i16(<vscale x 8 x i64> %x, <vscale x 8 x i16
; CHECK-LABEL: truncstore_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vnsrl.wi v16, v8, 0
; CHECK-NEXT: vnsrl.wx v16, v8, zero
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v16, 0
; CHECK-NEXT: vnsrl.wx v8, v16, zero
; CHECK-NEXT: vs2r.v v8, (a0)
; CHECK-NEXT: ret
%y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i16>
Expand All @@ -1090,7 +1090,7 @@ define void @truncstore_nxv8i64_nxv8i32(<vscale x 8 x i64> %x, <vscale x 8 x i32
; CHECK-LABEL: truncstore_nxv8i64_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; CHECK-NEXT: vnsrl.wi v16, v8, 0
; CHECK-NEXT: vnsrl.wx v16, v8, zero
; CHECK-NEXT: vs4r.v v16, (a0)
; CHECK-NEXT: ret
%y = trunc <vscale x 8 x i64> %x to <vscale x 8 x i32>
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) nounwind {
; LMULMAX8-RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; LMULMAX8-RV32-NEXT: vnsrl.wi v10, v12, 23
; LMULMAX8-RV32-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; LMULMAX8-RV32-NEXT: vnsrl.wi v9, v10, 0
; LMULMAX8-RV32-NEXT: vnsrl.wx v9, v10, zero
; LMULMAX8-RV32-NEXT: li a1, 134
; LMULMAX8-RV32-NEXT: vmseq.vi v0, v8, 0
; LMULMAX8-RV32-NEXT: vrsub.vx v8, v9, a1
Expand All @@ -143,7 +143,7 @@ define void @ctlz_v16i8(<16 x i8>* %x, <16 x i8>* %y) nounwind {
; LMULMAX8-RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; LMULMAX8-RV64-NEXT: vnsrl.wi v10, v12, 23
; LMULMAX8-RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; LMULMAX8-RV64-NEXT: vnsrl.wi v9, v10, 0
; LMULMAX8-RV64-NEXT: vnsrl.wx v9, v10, zero
; LMULMAX8-RV64-NEXT: li a1, 134
; LMULMAX8-RV64-NEXT: vmseq.vi v0, v8, 0
; LMULMAX8-RV64-NEXT: vrsub.vx v8, v9, a1
Expand Down Expand Up @@ -1045,7 +1045,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
; LMULMAX8-RV32-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; LMULMAX8-RV32-NEXT: vnsrl.wi v12, v16, 23
; LMULMAX8-RV32-NEXT: vsetvli zero, zero, e8, m2, ta, mu
; LMULMAX8-RV32-NEXT: vnsrl.wi v10, v12, 0
; LMULMAX8-RV32-NEXT: vnsrl.wx v10, v12, zero
; LMULMAX8-RV32-NEXT: li a1, 134
; LMULMAX8-RV32-NEXT: vmseq.vi v0, v8, 0
; LMULMAX8-RV32-NEXT: vrsub.vx v8, v10, a1
Expand All @@ -1064,7 +1064,7 @@ define void @ctlz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
; LMULMAX8-RV64-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; LMULMAX8-RV64-NEXT: vnsrl.wi v12, v16, 23
; LMULMAX8-RV64-NEXT: vsetvli zero, zero, e8, m2, ta, mu
; LMULMAX8-RV64-NEXT: vnsrl.wi v10, v12, 0
; LMULMAX8-RV64-NEXT: vnsrl.wx v10, v12, zero
; LMULMAX8-RV64-NEXT: li a1, 134
; LMULMAX8-RV64-NEXT: vmseq.vi v0, v8, 0
; LMULMAX8-RV64-NEXT: vrsub.vx v8, v10, a1
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ define void @cttz_v16i8(<16 x i8>* %x, <16 x i8>* %y) nounwind {
; LMULMAX8-RV32-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; LMULMAX8-RV32-NEXT: vnsrl.wi v10, v12, 23
; LMULMAX8-RV32-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; LMULMAX8-RV32-NEXT: vnsrl.wi v9, v10, 0
; LMULMAX8-RV32-NEXT: vnsrl.wx v9, v10, zero
; LMULMAX8-RV32-NEXT: li a1, 127
; LMULMAX8-RV32-NEXT: vmseq.vi v0, v8, 0
; LMULMAX8-RV32-NEXT: vsub.vx v8, v9, a1
Expand All @@ -135,7 +135,7 @@ define void @cttz_v16i8(<16 x i8>* %x, <16 x i8>* %y) nounwind {
; LMULMAX8-RV64-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; LMULMAX8-RV64-NEXT: vnsrl.wi v10, v12, 23
; LMULMAX8-RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; LMULMAX8-RV64-NEXT: vnsrl.wi v9, v10, 0
; LMULMAX8-RV64-NEXT: vnsrl.wx v9, v10, zero
; LMULMAX8-RV64-NEXT: li a1, 127
; LMULMAX8-RV64-NEXT: vmseq.vi v0, v8, 0
; LMULMAX8-RV64-NEXT: vsub.vx v8, v9, a1
Expand Down Expand Up @@ -927,7 +927,7 @@ define void @cttz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
; LMULMAX8-RV32-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; LMULMAX8-RV32-NEXT: vnsrl.wi v12, v16, 23
; LMULMAX8-RV32-NEXT: vsetvli zero, zero, e8, m2, ta, mu
; LMULMAX8-RV32-NEXT: vnsrl.wi v10, v12, 0
; LMULMAX8-RV32-NEXT: vnsrl.wx v10, v12, zero
; LMULMAX8-RV32-NEXT: li a1, 127
; LMULMAX8-RV32-NEXT: vmseq.vi v0, v8, 0
; LMULMAX8-RV32-NEXT: vsub.vx v8, v10, a1
Expand All @@ -948,7 +948,7 @@ define void @cttz_v32i8(<32 x i8>* %x, <32 x i8>* %y) nounwind {
; LMULMAX8-RV64-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; LMULMAX8-RV64-NEXT: vnsrl.wi v12, v16, 23
; LMULMAX8-RV64-NEXT: vsetvli zero, zero, e8, m2, ta, mu
; LMULMAX8-RV64-NEXT: vnsrl.wi v10, v12, 0
; LMULMAX8-RV64-NEXT: vnsrl.wx v10, v12, zero
; LMULMAX8-RV64-NEXT: li a1, 127
; LMULMAX8-RV64-NEXT: vmseq.vi v0, v8, 0
; LMULMAX8-RV64-NEXT: vsub.vx v8, v10, a1
Expand Down
300 changes: 150 additions & 150 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll

Large diffs are not rendered by default.

48 changes: 24 additions & 24 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll
Original file line number Diff line number Diff line change
Expand Up @@ -348,9 +348,9 @@ define void @fp2si_v2f64_v2i8(<2 x double>* %x, <2 x i8>* %y) {
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v9, 0
; CHECK-NEXT: vnsrl.wx v8, v9, zero
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: vse8.v v8, (a1)
; CHECK-NEXT: ret
%a = load <2 x double>, <2 x double>* %x
Expand All @@ -367,9 +367,9 @@ define void @fp2ui_v2f64_v2i8(<2 x double>* %x, <2 x i8>* %y) {
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v9, 0
; CHECK-NEXT: vnsrl.wx v8, v9, zero
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: vse8.v v8, (a1)
; CHECK-NEXT: ret
%a = load <2 x double>, <2 x double>* %x
Expand Down Expand Up @@ -410,9 +410,9 @@ define void @fp2si_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
; LMULMAX8-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; LMULMAX8-NEXT: vfncvt.rtz.x.f.w v12, v8
; LMULMAX8-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; LMULMAX8-NEXT: vnsrl.wi v8, v12, 0
; LMULMAX8-NEXT: vnsrl.wx v8, v12, zero
; LMULMAX8-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; LMULMAX8-NEXT: vnsrl.wi v8, v8, 0
; LMULMAX8-NEXT: vnsrl.wx v8, v8, zero
; LMULMAX8-NEXT: vse8.v v8, (a1)
; LMULMAX8-NEXT: ret
;
Expand All @@ -429,31 +429,31 @@ define void @fp2si_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v12, v10
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; LMULMAX1-NEXT: vnsrl.wi v10, v12, 0
; LMULMAX1-NEXT: vnsrl.wx v10, v12, zero
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; LMULMAX1-NEXT: vnsrl.wi v10, v10, 0
; LMULMAX1-NEXT: vnsrl.wx v10, v10, zero
; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v12, v11
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; LMULMAX1-NEXT: vnsrl.wi v11, v12, 0
; LMULMAX1-NEXT: vnsrl.wx v11, v12, zero
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; LMULMAX1-NEXT: vnsrl.wi v11, v11, 0
; LMULMAX1-NEXT: vnsrl.wx v11, v11, zero
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
; LMULMAX1-NEXT: vslideup.vi v10, v11, 2
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v11, v9
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0
; LMULMAX1-NEXT: vnsrl.wx v9, v11, zero
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero
; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, mu
; LMULMAX1-NEXT: vslideup.vi v10, v9, 4
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vfncvt.rtz.x.f.w v9, v8
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; LMULMAX1-NEXT: vnsrl.wi v8, v9, 0
; LMULMAX1-NEXT: vnsrl.wx v8, v9, zero
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu
; LMULMAX1-NEXT: vslideup.vi v10, v8, 6
; LMULMAX1-NEXT: vse8.v v10, (a1)
Expand All @@ -472,9 +472,9 @@ define void @fp2ui_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
; LMULMAX8-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; LMULMAX8-NEXT: vfncvt.rtz.xu.f.w v12, v8
; LMULMAX8-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; LMULMAX8-NEXT: vnsrl.wi v8, v12, 0
; LMULMAX8-NEXT: vnsrl.wx v8, v12, zero
; LMULMAX8-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; LMULMAX8-NEXT: vnsrl.wi v8, v8, 0
; LMULMAX8-NEXT: vnsrl.wx v8, v8, zero
; LMULMAX8-NEXT: vse8.v v8, (a1)
; LMULMAX8-NEXT: ret
;
Expand All @@ -491,31 +491,31 @@ define void @fp2ui_v8f64_v8i8(<8 x double>* %x, <8 x i8>* %y) {
; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v12, v10
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; LMULMAX1-NEXT: vnsrl.wi v10, v12, 0
; LMULMAX1-NEXT: vnsrl.wx v10, v12, zero
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; LMULMAX1-NEXT: vnsrl.wi v10, v10, 0
; LMULMAX1-NEXT: vnsrl.wx v10, v10, zero
; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v12, v11
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; LMULMAX1-NEXT: vnsrl.wi v11, v12, 0
; LMULMAX1-NEXT: vnsrl.wx v11, v12, zero
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; LMULMAX1-NEXT: vnsrl.wi v11, v11, 0
; LMULMAX1-NEXT: vnsrl.wx v11, v11, zero
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
; LMULMAX1-NEXT: vslideup.vi v10, v11, 2
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v11, v9
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0
; LMULMAX1-NEXT: vnsrl.wx v9, v11, zero
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero
; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, mu
; LMULMAX1-NEXT: vslideup.vi v10, v9, 4
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; LMULMAX1-NEXT: vfncvt.rtz.xu.f.w v9, v8
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; LMULMAX1-NEXT: vnsrl.wi v8, v9, 0
; LMULMAX1-NEXT: vnsrl.wx v8, v9, zero
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu
; LMULMAX1-NEXT: vslideup.vi v10, v8, 6
; LMULMAX1-NEXT: vse8.v v10, (a1)
Expand Down
20 changes: 10 additions & 10 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll
Original file line number Diff line number Diff line change
Expand Up @@ -169,9 +169,9 @@ define void @trunc_v4i8_v4i32(<4 x i32>* %x, <4 x i8>* %z) {
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: vse8.v v8, (a1)
; CHECK-NEXT: ret
%a = load <4 x i32>, <4 x i32>* %x
Expand All @@ -186,9 +186,9 @@ define void @trunc_v8i8_v8i32(<8 x i32>* %x, <8 x i8>* %z) {
; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; LMULMAX8-NEXT: vle32.v v8, (a0)
; LMULMAX8-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; LMULMAX8-NEXT: vnsrl.wi v10, v8, 0
; LMULMAX8-NEXT: vnsrl.wx v10, v8, zero
; LMULMAX8-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; LMULMAX8-NEXT: vnsrl.wi v8, v10, 0
; LMULMAX8-NEXT: vnsrl.wx v8, v10, zero
; LMULMAX8-NEXT: vse8.v v8, (a1)
; LMULMAX8-NEXT: ret
;
Expand All @@ -197,9 +197,9 @@ define void @trunc_v8i8_v8i32(<8 x i32>* %x, <8 x i8>* %z) {
; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; LMULMAX2-NEXT: vle32.v v8, (a0)
; LMULMAX2-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; LMULMAX2-NEXT: vnsrl.wi v10, v8, 0
; LMULMAX2-NEXT: vnsrl.wx v10, v8, zero
; LMULMAX2-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; LMULMAX2-NEXT: vnsrl.wi v8, v10, 0
; LMULMAX2-NEXT: vnsrl.wx v8, v10, zero
; LMULMAX2-NEXT: vse8.v v8, (a1)
; LMULMAX2-NEXT: ret
;
Expand All @@ -210,13 +210,13 @@ define void @trunc_v8i8_v8i32(<8 x i32>* %x, <8 x i8>* %z) {
; LMULMAX1-NEXT: addi a0, a0, 16
; LMULMAX1-NEXT: vle32.v v9, (a0)
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
; LMULMAX1-NEXT: vnsrl.wx v8, v8, zero
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
; LMULMAX1-NEXT: vnsrl.wx v9, v9, zero
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu
; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
; LMULMAX1-NEXT: vse8.v v8, (a1)
Expand Down
28 changes: 14 additions & 14 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1037,7 +1037,7 @@ define <8 x i64> @mgather_baseidx_sext_v8i8_v8i64(i64* %base, <8 x i8> %idxs, <8
; RV32-NEXT: vsext.vf8 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vnsrl.wx v16, v8, zero
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t
; RV32-NEXT: vmv.v.v v8, v12
Expand All @@ -1064,7 +1064,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i8_v8i64(i64* %base, <8 x i8> %idxs, <8
; RV32-NEXT: vzext.vf8 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vnsrl.wx v16, v8, zero
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t
; RV32-NEXT: vmv.v.v v8, v12
Expand Down Expand Up @@ -1115,7 +1115,7 @@ define <8 x i64> @mgather_baseidx_sext_v8i16_v8i64(i64* %base, <8 x i16> %idxs,
; RV32-NEXT: vsext.vf4 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vnsrl.wx v16, v8, zero
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t
; RV32-NEXT: vmv.v.v v8, v12
Expand All @@ -1142,7 +1142,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i16_v8i64(i64* %base, <8 x i16> %idxs,
; RV32-NEXT: vzext.vf4 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vnsrl.wx v16, v8, zero
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t
; RV32-NEXT: vmv.v.v v8, v12
Expand Down Expand Up @@ -1192,7 +1192,7 @@ define <8 x i64> @mgather_baseidx_sext_v8i32_v8i64(i64* %base, <8 x i32> %idxs,
; RV32-NEXT: vsext.vf2 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vnsrl.wx v16, v8, zero
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t
; RV32-NEXT: vmv.v.v v8, v12
Expand All @@ -1219,7 +1219,7 @@ define <8 x i64> @mgather_baseidx_zext_v8i32_v8i64(i64* %base, <8 x i32> %idxs,
; RV32-NEXT: vzext.vf2 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vnsrl.wx v16, v8, zero
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t
; RV32-NEXT: vmv.v.v v8, v12
Expand All @@ -1245,7 +1245,7 @@ define <8 x i64> @mgather_baseidx_v8i64(i64* %base, <8 x i64> %idxs, <8 x i1> %m
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vsll.vi v8, v8, 3
; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vnsrl.wx v16, v8, zero
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t
; RV32-NEXT: vmv.v.v v8, v12
Expand Down Expand Up @@ -1907,7 +1907,7 @@ define <8 x double> @mgather_baseidx_sext_v8i8_v8f64(double* %base, <8 x i8> %id
; RV32-NEXT: vsext.vf8 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vnsrl.wx v16, v8, zero
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t
; RV32-NEXT: vmv.v.v v8, v12
Expand All @@ -1934,7 +1934,7 @@ define <8 x double> @mgather_baseidx_zext_v8i8_v8f64(double* %base, <8 x i8> %id
; RV32-NEXT: vzext.vf8 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vnsrl.wx v16, v8, zero
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t
; RV32-NEXT: vmv.v.v v8, v12
Expand Down Expand Up @@ -1985,7 +1985,7 @@ define <8 x double> @mgather_baseidx_sext_v8i16_v8f64(double* %base, <8 x i16> %
; RV32-NEXT: vsext.vf4 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vnsrl.wx v16, v8, zero
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t
; RV32-NEXT: vmv.v.v v8, v12
Expand All @@ -2012,7 +2012,7 @@ define <8 x double> @mgather_baseidx_zext_v8i16_v8f64(double* %base, <8 x i16> %
; RV32-NEXT: vzext.vf4 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vnsrl.wx v16, v8, zero
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t
; RV32-NEXT: vmv.v.v v8, v12
Expand Down Expand Up @@ -2062,7 +2062,7 @@ define <8 x double> @mgather_baseidx_sext_v8i32_v8f64(double* %base, <8 x i32> %
; RV32-NEXT: vsext.vf2 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vnsrl.wx v16, v8, zero
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t
; RV32-NEXT: vmv.v.v v8, v12
Expand All @@ -2089,7 +2089,7 @@ define <8 x double> @mgather_baseidx_zext_v8i32_v8f64(double* %base, <8 x i32> %
; RV32-NEXT: vzext.vf2 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vnsrl.wx v16, v8, zero
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t
; RV32-NEXT: vmv.v.v v8, v12
Expand All @@ -2115,7 +2115,7 @@ define <8 x double> @mgather_baseidx_v8f64(double* %base, <8 x i64> %idxs, <8 x
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vsll.vi v8, v8, 3
; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vnsrl.wx v16, v8, zero
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v12, (a0), v16, v0.t
; RV32-NEXT: vmv.v.v v8, v12
Expand Down
68 changes: 34 additions & 34 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
Original file line number Diff line number Diff line change
Expand Up @@ -44,14 +44,14 @@ define void @mscatter_v2i16_truncstore_v2i8(<2 x i16> %val, <2 x i8*> %ptrs, <2
; RV32-LABEL: mscatter_v2i16_truncstore_v2i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_v2i16_truncstore_v2i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t
; RV64-NEXT: ret
%tval = trunc <2 x i16> %val to <2 x i8>
Expand All @@ -63,18 +63,18 @@ define void @mscatter_v2i32_truncstore_v2i8(<2 x i32> %val, <2 x i8*> %ptrs, <2
; RV32-LABEL: mscatter_v2i32_truncstore_v2i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_v2i32_truncstore_v2i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t
; RV64-NEXT: ret
%tval = trunc <2 x i32> %val to <2 x i8>
Expand All @@ -86,22 +86,22 @@ define void @mscatter_v2i64_truncstore_v2i8(<2 x i64> %val, <2 x i8*> %ptrs, <2
; RV32-LABEL: mscatter_v2i64_truncstore_v2i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_v2i64_truncstore_v2i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t
; RV64-NEXT: ret
%tval = trunc <2 x i64> %val to <2 x i8>
Expand Down Expand Up @@ -236,14 +236,14 @@ define void @mscatter_v2i32_truncstore_v2i16(<2 x i32> %val, <2 x i16*> %ptrs, <
; RV32-LABEL: mscatter_v2i32_truncstore_v2i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_v2i32_truncstore_v2i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t
; RV64-NEXT: ret
%tval = trunc <2 x i32> %val to <2 x i16>
Expand All @@ -255,18 +255,18 @@ define void @mscatter_v2i64_truncstore_v2i16(<2 x i64> %val, <2 x i16*> %ptrs, <
; RV32-LABEL: mscatter_v2i64_truncstore_v2i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_v2i64_truncstore_v2i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t
; RV64-NEXT: ret
%tval = trunc <2 x i64> %val to <2 x i16>
Expand Down Expand Up @@ -474,14 +474,14 @@ define void @mscatter_v2i64_truncstore_v2i32(<2 x i64> %val, <2 x i32*> %ptrs, <
; RV32-LABEL: mscatter_v2i64_truncstore_v2i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_v2i64_truncstore_v2i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t
; RV64-NEXT: ret
%tval = trunc <2 x i64> %val to <2 x i32>
Expand Down Expand Up @@ -843,7 +843,7 @@ define void @mscatter_baseidx_sext_v8i8_v8i64(<8 x i64> %val, i64* %base, <8 x i
; RV32-NEXT: vsext.vf8 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -868,7 +868,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, i64* %base, <8 x i
; RV32-NEXT: vzext.vf8 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -915,7 +915,7 @@ define void @mscatter_baseidx_sext_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x
; RV32-NEXT: vsext.vf4 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -940,7 +940,7 @@ define void @mscatter_baseidx_zext_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x
; RV32-NEXT: vzext.vf4 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -986,7 +986,7 @@ define void @mscatter_baseidx_sext_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x
; RV32-NEXT: vsext.vf2 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -1011,7 +1011,7 @@ define void @mscatter_baseidx_zext_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x
; RV32-NEXT: vzext.vf2 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -1035,7 +1035,7 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, i64* %base, <8 x i64> %idxs,
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vsll.vi v12, v12, 3
; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1637,7 +1637,7 @@ define void @mscatter_baseidx_sext_v8i8_v8f64(<8 x double> %val, double* %base,
; RV32-NEXT: vsext.vf8 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -1662,7 +1662,7 @@ define void @mscatter_baseidx_zext_v8i8_v8f64(<8 x double> %val, double* %base,
; RV32-NEXT: vzext.vf8 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1709,7 +1709,7 @@ define void @mscatter_baseidx_sext_v8i16_v8f64(<8 x double> %val, double* %base,
; RV32-NEXT: vsext.vf4 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -1734,7 +1734,7 @@ define void @mscatter_baseidx_zext_v8i16_v8f64(<8 x double> %val, double* %base,
; RV32-NEXT: vzext.vf4 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1780,7 +1780,7 @@ define void @mscatter_baseidx_sext_v8i32_v8f64(<8 x double> %val, double* %base,
; RV32-NEXT: vsext.vf2 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -1805,7 +1805,7 @@ define void @mscatter_baseidx_zext_v8i32_v8f64(<8 x double> %val, double* %base,
; RV32-NEXT: vzext.vf2 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -1829,7 +1829,7 @@ define void @mscatter_baseidx_v8f64(<8 x double> %val, double* %base, <8 x i64>
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vsll.vi v12, v12, 3
; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down
56 changes: 28 additions & 28 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
Original file line number Diff line number Diff line change
Expand Up @@ -965,7 +965,7 @@ define <8 x i64> @vpgather_baseidx_sext_v8i8_v8i64(i64* %base, <8 x i8> %idxs, <
; RV32-NEXT: vsext.vf8 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 3
; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v12, v8, 0
; RV32-NEXT: vnsrl.wx v12, v8, zero
; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
Expand All @@ -991,7 +991,7 @@ define <8 x i64> @vpgather_baseidx_zext_v8i8_v8i64(i64* %base, <8 x i8> %idxs, <
; RV32-NEXT: vzext.vf8 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 3
; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v12, v8, 0
; RV32-NEXT: vnsrl.wx v12, v8, zero
; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1040,7 +1040,7 @@ define <8 x i64> @vpgather_baseidx_sext_v8i16_v8i64(i64* %base, <8 x i16> %idxs,
; RV32-NEXT: vsext.vf4 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 3
; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v12, v8, 0
; RV32-NEXT: vnsrl.wx v12, v8, zero
; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
Expand All @@ -1066,7 +1066,7 @@ define <8 x i64> @vpgather_baseidx_zext_v8i16_v8i64(i64* %base, <8 x i16> %idxs,
; RV32-NEXT: vzext.vf4 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 3
; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v12, v8, 0
; RV32-NEXT: vnsrl.wx v12, v8, zero
; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1114,7 +1114,7 @@ define <8 x i64> @vpgather_baseidx_sext_v8i32_v8i64(i64* %base, <8 x i32> %idxs,
; RV32-NEXT: vsext.vf2 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 3
; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v12, v8, 0
; RV32-NEXT: vnsrl.wx v12, v8, zero
; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
Expand All @@ -1140,7 +1140,7 @@ define <8 x i64> @vpgather_baseidx_zext_v8i32_v8i64(i64* %base, <8 x i32> %idxs,
; RV32-NEXT: vzext.vf2 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 3
; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v12, v8, 0
; RV32-NEXT: vnsrl.wx v12, v8, zero
; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
Expand All @@ -1165,7 +1165,7 @@ define <8 x i64> @vpgather_baseidx_v8i64(i64* %base, <8 x i64> %idxs, <8 x i1> %
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vsll.vi v8, v8, 3
; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v12, v8, 0
; RV32-NEXT: vnsrl.wx v12, v8, zero
; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1702,7 +1702,7 @@ define <8 x double> @vpgather_baseidx_sext_v8i8_v8f64(double* %base, <8 x i8> %i
; RV32-NEXT: vsext.vf8 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 3
; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v12, v8, 0
; RV32-NEXT: vnsrl.wx v12, v8, zero
; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
Expand All @@ -1728,7 +1728,7 @@ define <8 x double> @vpgather_baseidx_zext_v8i8_v8f64(double* %base, <8 x i8> %i
; RV32-NEXT: vzext.vf8 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 3
; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v12, v8, 0
; RV32-NEXT: vnsrl.wx v12, v8, zero
; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1777,7 +1777,7 @@ define <8 x double> @vpgather_baseidx_sext_v8i16_v8f64(double* %base, <8 x i16>
; RV32-NEXT: vsext.vf4 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 3
; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v12, v8, 0
; RV32-NEXT: vnsrl.wx v12, v8, zero
; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
Expand All @@ -1803,7 +1803,7 @@ define <8 x double> @vpgather_baseidx_zext_v8i16_v8f64(double* %base, <8 x i16>
; RV32-NEXT: vzext.vf4 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 3
; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v12, v8, 0
; RV32-NEXT: vnsrl.wx v12, v8, zero
; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1851,7 +1851,7 @@ define <8 x double> @vpgather_baseidx_sext_v8i32_v8f64(double* %base, <8 x i32>
; RV32-NEXT: vsext.vf2 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 3
; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v12, v8, 0
; RV32-NEXT: vnsrl.wx v12, v8, zero
; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
Expand All @@ -1877,7 +1877,7 @@ define <8 x double> @vpgather_baseidx_zext_v8i32_v8f64(double* %base, <8 x i32>
; RV32-NEXT: vzext.vf2 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 3
; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v12, v8, 0
; RV32-NEXT: vnsrl.wx v12, v8, zero
; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
Expand All @@ -1902,7 +1902,7 @@ define <8 x double> @vpgather_baseidx_v8f64(double* %base, <8 x i64> %idxs, <8 x
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vsll.vi v8, v8, 3
; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v12, v8, 0
; RV32-NEXT: vnsrl.wx v12, v8, zero
; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -2058,7 +2058,7 @@ define <32 x double> @vpgather_baseidx_sext_v32i8_v32f64(double* %base, <32 x i8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v16, v16, 3
; RV32-NEXT: vsetvli a3, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v12, v16, 0
; RV32-NEXT: vnsrl.wx v12, v16, zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v12, v0.t
; RV32-NEXT: li a2, 16
Expand All @@ -2069,7 +2069,7 @@ define <32 x double> @vpgather_baseidx_sext_v32i8_v32f64(double* %base, <32 x i8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v24, v24, 3
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v4, v24, 0
; RV32-NEXT: vnsrl.wx v4, v24, zero
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
; RV32-NEXT: vmv1r.v v0, v10
; RV32-NEXT: vluxei32.v v8, (a0), v4, v0.t
Expand Down Expand Up @@ -2132,7 +2132,7 @@ define <32 x double> @vpgather_baseidx_zext_v32i8_v32f64(double* %base, <32 x i8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v16, v16, 3
; RV32-NEXT: vsetvli a3, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v12, v16, 0
; RV32-NEXT: vnsrl.wx v12, v16, zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v12, v0.t
; RV32-NEXT: li a2, 16
Expand All @@ -2143,7 +2143,7 @@ define <32 x double> @vpgather_baseidx_zext_v32i8_v32f64(double* %base, <32 x i8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v24, v24, 3
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v4, v24, 0
; RV32-NEXT: vnsrl.wx v4, v24, zero
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
; RV32-NEXT: vmv1r.v v0, v10
; RV32-NEXT: vluxei32.v v8, (a0), v4, v0.t
Expand Down Expand Up @@ -2270,7 +2270,7 @@ define <32 x double> @vpgather_baseidx_sext_v32i16_v32f64(double* %base, <32 x i
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v16, v16, 3
; RV32-NEXT: vsetvli a3, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v8, v16, 0
; RV32-NEXT: vnsrl.wx v8, v16, zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
; RV32-NEXT: li a2, 16
Expand All @@ -2281,7 +2281,7 @@ define <32 x double> @vpgather_baseidx_sext_v32i16_v32f64(double* %base, <32 x i
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v24, v24, 3
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v4, v24, 0
; RV32-NEXT: vnsrl.wx v4, v24, zero
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
; RV32-NEXT: vmv1r.v v0, v12
; RV32-NEXT: vluxei32.v v8, (a0), v4, v0.t
Expand Down Expand Up @@ -2344,7 +2344,7 @@ define <32 x double> @vpgather_baseidx_zext_v32i16_v32f64(double* %base, <32 x i
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v16, v16, 3
; RV32-NEXT: vsetvli a3, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v8, v16, 0
; RV32-NEXT: vnsrl.wx v8, v16, zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
; RV32-NEXT: li a2, 16
Expand All @@ -2355,7 +2355,7 @@ define <32 x double> @vpgather_baseidx_zext_v32i16_v32f64(double* %base, <32 x i
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v24, v24, 3
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v4, v24, 0
; RV32-NEXT: vnsrl.wx v4, v24, zero
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
; RV32-NEXT: vmv1r.v v0, v12
; RV32-NEXT: vluxei32.v v8, (a0), v4, v0.t
Expand Down Expand Up @@ -2481,7 +2481,7 @@ define <32 x double> @vpgather_baseidx_sext_v32i32_v32f64(double* %base, <32 x i
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli a3, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v4, v8, 0
; RV32-NEXT: vnsrl.wx v4, v8, zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v4, v0.t
; RV32-NEXT: li a2, 16
Expand All @@ -2492,7 +2492,7 @@ define <32 x double> @vpgather_baseidx_sext_v32i32_v32f64(double* %base, <32 x i
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
; RV32-NEXT: vmv1r.v v0, v1
; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t
Expand Down Expand Up @@ -2555,7 +2555,7 @@ define <32 x double> @vpgather_baseidx_zext_v32i32_v32f64(double* %base, <32 x i
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli a3, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v4, v8, 0
; RV32-NEXT: vnsrl.wx v4, v8, zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v4, v0.t
; RV32-NEXT: li a2, 16
Expand All @@ -2566,7 +2566,7 @@ define <32 x double> @vpgather_baseidx_zext_v32i32_v32f64(double* %base, <32 x i
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
; RV32-NEXT: vmv1r.v v0, v1
; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t
Expand Down Expand Up @@ -2624,7 +2624,7 @@ define <32 x double> @vpgather_baseidx_v32f64(double* %base, <32 x i64> %idxs, <
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v16, v16, 3
; RV32-NEXT: vsetvli a3, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v28, v16, 0
; RV32-NEXT: vnsrl.wx v28, v16, zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t
; RV32-NEXT: li a2, 16
Expand All @@ -2635,7 +2635,7 @@ define <32 x double> @vpgather_baseidx_v32f64(double* %base, <32 x i64> %idxs, <
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v8, v8, 3
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v28, v8, 0
; RV32-NEXT: vnsrl.wx v28, v8, zero
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
; RV32-NEXT: vmv1r.v v0, v24
; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t
Expand Down
76 changes: 38 additions & 38 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
Original file line number Diff line number Diff line change
Expand Up @@ -26,15 +26,15 @@ define void @vpscatter_v2i16_truncstore_v2i8(<2 x i16> %val, <2 x i8*> %ptrs, <2
; RV32-LABEL: vpscatter_v2i16_truncstore_v2i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_v2i16_truncstore_v2i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t
; RV64-NEXT: ret
Expand All @@ -47,19 +47,19 @@ define void @vpscatter_v2i32_truncstore_v2i8(<2 x i32> %val, <2 x i8*> %ptrs, <2
; RV32-LABEL: vpscatter_v2i32_truncstore_v2i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_v2i32_truncstore_v2i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t
; RV64-NEXT: ret
Expand All @@ -72,23 +72,23 @@ define void @vpscatter_v2i64_truncstore_v2i8(<2 x i64> %val, <2 x i8*> %ptrs, <2
; RV32-LABEL: vpscatter_v2i64_truncstore_v2i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_v2i64_truncstore_v2i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t
; RV64-NEXT: ret
Expand Down Expand Up @@ -194,15 +194,15 @@ define void @vpscatter_v2i32_truncstore_v2i16(<2 x i32> %val, <2 x i16*> %ptrs,
; RV32-LABEL: vpscatter_v2i32_truncstore_v2i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_v2i32_truncstore_v2i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t
; RV64-NEXT: ret
Expand All @@ -215,19 +215,19 @@ define void @vpscatter_v2i64_truncstore_v2i16(<2 x i64> %val, <2 x i16*> %ptrs,
; RV32-LABEL: vpscatter_v2i64_truncstore_v2i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_v2i64_truncstore_v2i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t
; RV64-NEXT: ret
Expand Down Expand Up @@ -442,15 +442,15 @@ define void @vpscatter_v2i64_truncstore_v2i32(<2 x i64> %val, <2 x i32*> %ptrs,
; RV32-LABEL: vpscatter_v2i64_truncstore_v2i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_v2i64_truncstore_v2i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t
; RV64-NEXT: ret
Expand Down Expand Up @@ -779,7 +779,7 @@ define void @vpscatter_baseidx_sext_v8i8_v8i64(<8 x i64> %val, i64* %base, <8 x
; RV32-NEXT: vsext.vf8 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -805,7 +805,7 @@ define void @vpscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, i64* %base, <8 x
; RV32-NEXT: vzext.vf8 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -854,7 +854,7 @@ define void @vpscatter_baseidx_sext_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x
; RV32-NEXT: vsext.vf4 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -880,7 +880,7 @@ define void @vpscatter_baseidx_zext_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x
; RV32-NEXT: vzext.vf4 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -928,7 +928,7 @@ define void @vpscatter_baseidx_sext_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x
; RV32-NEXT: vsext.vf2 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -954,7 +954,7 @@ define void @vpscatter_baseidx_zext_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x
; RV32-NEXT: vzext.vf2 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -979,7 +979,7 @@ define void @vpscatter_baseidx_v8i64(<8 x i64> %val, i64* %base, <8 x i64> %idxs
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vsll.vi v12, v12, 3
; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1500,7 +1500,7 @@ define void @vpscatter_baseidx_sext_v8i8_v8f64(<8 x double> %val, double* %base,
; RV32-NEXT: vsext.vf8 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -1526,7 +1526,7 @@ define void @vpscatter_baseidx_zext_v8i8_v8f64(<8 x double> %val, double* %base,
; RV32-NEXT: vzext.vf8 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1575,7 +1575,7 @@ define void @vpscatter_baseidx_sext_v8i16_v8f64(<8 x double> %val, double* %base
; RV32-NEXT: vsext.vf4 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -1601,7 +1601,7 @@ define void @vpscatter_baseidx_zext_v8i16_v8f64(<8 x double> %val, double* %base
; RV32-NEXT: vzext.vf4 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1649,7 +1649,7 @@ define void @vpscatter_baseidx_sext_v8i32_v8f64(<8 x double> %val, double* %base
; RV32-NEXT: vsext.vf2 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -1675,7 +1675,7 @@ define void @vpscatter_baseidx_zext_v8i32_v8f64(<8 x double> %val, double* %base
; RV32-NEXT: vzext.vf2 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -1700,7 +1700,7 @@ define void @vpscatter_baseidx_v8f64(<8 x double> %val, double* %base, <8 x i64>
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vsll.vi v12, v12, 3
; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1932,7 +1932,7 @@ define void @vpscatter_baseidx_sext_v32i32_v32f64(<32 x double> %val, double* %b
; RV32-NEXT: vsext.vf2 v16, v24
; RV32-NEXT: vsll.vi v8, v8, 3
; RV32-NEXT: vsetvli a4, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
; RV32-NEXT: addi a1, a2, -16
; RV32-NEXT: csrr a4, vlenb
Expand All @@ -1950,7 +1950,7 @@ define void @vpscatter_baseidx_sext_v32i32_v32f64(<32 x double> %val, double* %b
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vnsrl.wx v16, v8, zero
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: slli a1, a1, 4
Expand Down Expand Up @@ -2078,7 +2078,7 @@ define void @vpscatter_baseidx_zext_v32i32_v32f64(<32 x double> %val, double* %b
; RV32-NEXT: vzext.vf2 v16, v24
; RV32-NEXT: vsll.vi v8, v8, 3
; RV32-NEXT: vsetvli a4, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
; RV32-NEXT: addi a1, a2, -16
; RV32-NEXT: csrr a4, vlenb
Expand All @@ -2096,7 +2096,7 @@ define void @vpscatter_baseidx_zext_v32i32_v32f64(<32 x double> %val, double* %b
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vnsrl.wx v16, v8, zero
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: slli a1, a1, 4
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@ define <vscale x 4 x i5> @trunc_nxv4i32_to_nxv4i5(<vscale x 4 x i32> %a) {
; CHECK-LABEL: trunc_nxv4i32_to_nxv4i5:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vnsrl.wi v10, v8, 0
; CHECK-NEXT: vnsrl.wx v10, v8, zero
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: vnsrl.wx v8, v10, zero
; CHECK-NEXT: ret
%v = trunc <vscale x 4 x i32> %a to <vscale x 4 x i5>
ret <vscale x 4 x i5> %v
Expand All @@ -18,9 +18,9 @@ define <vscale x 1 x i5> @trunc_nxv1i32_to_nxv1i5(<vscale x 1 x i32> %a) {
; CHECK-LABEL: trunc_nxv1i32_to_nxv1i5:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: ret
%v = trunc <vscale x 1 x i32> %a to <vscale x 1 x i5>
ret <vscale x 1 x i5> %v
Expand Down
28 changes: 14 additions & 14 deletions llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1043,7 +1043,7 @@ define <vscale x 8 x i64> @mgather_baseidx_sext_nxv8i8_nxv8i64(i64* %base, <vsca
; RV32-NEXT: vsext.vf8 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand All @@ -1070,7 +1070,7 @@ define <vscale x 8 x i64> @mgather_baseidx_zext_nxv8i8_nxv8i64(i64* %base, <vsca
; RV32-NEXT: vzext.vf8 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand Down Expand Up @@ -1121,7 +1121,7 @@ define <vscale x 8 x i64> @mgather_baseidx_sext_nxv8i16_nxv8i64(i64* %base, <vsc
; RV32-NEXT: vsext.vf4 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand All @@ -1148,7 +1148,7 @@ define <vscale x 8 x i64> @mgather_baseidx_zext_nxv8i16_nxv8i64(i64* %base, <vsc
; RV32-NEXT: vzext.vf4 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand Down Expand Up @@ -1198,7 +1198,7 @@ define <vscale x 8 x i64> @mgather_baseidx_sext_nxv8i32_nxv8i64(i64* %base, <vsc
; RV32-NEXT: vsext.vf2 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand All @@ -1225,7 +1225,7 @@ define <vscale x 8 x i64> @mgather_baseidx_zext_nxv8i32_nxv8i64(i64* %base, <vsc
; RV32-NEXT: vzext.vf2 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand All @@ -1251,7 +1251,7 @@ define <vscale x 8 x i64> @mgather_baseidx_nxv8i64(i64* %base, <vscale x 8 x i64
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v8, v8, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand Down Expand Up @@ -1979,7 +1979,7 @@ define <vscale x 8 x double> @mgather_baseidx_sext_nxv8i8_nxv8f64(double* %base,
; RV32-NEXT: vsext.vf8 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand All @@ -2006,7 +2006,7 @@ define <vscale x 8 x double> @mgather_baseidx_zext_nxv8i8_nxv8f64(double* %base,
; RV32-NEXT: vzext.vf8 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand Down Expand Up @@ -2057,7 +2057,7 @@ define <vscale x 8 x double> @mgather_baseidx_sext_nxv8i16_nxv8f64(double* %base
; RV32-NEXT: vsext.vf4 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand All @@ -2084,7 +2084,7 @@ define <vscale x 8 x double> @mgather_baseidx_zext_nxv8i16_nxv8f64(double* %base
; RV32-NEXT: vzext.vf4 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand Down Expand Up @@ -2134,7 +2134,7 @@ define <vscale x 8 x double> @mgather_baseidx_sext_nxv8i32_nxv8f64(double* %base
; RV32-NEXT: vsext.vf2 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand All @@ -2161,7 +2161,7 @@ define <vscale x 8 x double> @mgather_baseidx_zext_nxv8i32_nxv8f64(double* %base
; RV32-NEXT: vzext.vf2 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand All @@ -2187,7 +2187,7 @@ define <vscale x 8 x double> @mgather_baseidx_nxv8f64(double* %base, <vscale x 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v8, v8, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand Down
68 changes: 34 additions & 34 deletions llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
Original file line number Diff line number Diff line change
Expand Up @@ -44,14 +44,14 @@ define void @mscatter_nxv2i16_truncstore_nxv2i8(<vscale x 2 x i16> %val, <vscale
; RV32-LABEL: mscatter_nxv2i16_truncstore_nxv2i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv2i16_truncstore_nxv2i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t
; RV64-NEXT: ret
%tval = trunc <vscale x 2 x i16> %val to <vscale x 2 x i8>
Expand All @@ -63,18 +63,18 @@ define void @mscatter_nxv2i32_truncstore_nxv2i8(<vscale x 2 x i32> %val, <vscale
; RV32-LABEL: mscatter_nxv2i32_truncstore_nxv2i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv2i32_truncstore_nxv2i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t
; RV64-NEXT: ret
%tval = trunc <vscale x 2 x i32> %val to <vscale x 2 x i8>
Expand All @@ -86,22 +86,22 @@ define void @mscatter_nxv2i64_truncstore_nxv2i8(<vscale x 2 x i64> %val, <vscale
; RV32-LABEL: mscatter_nxv2i64_truncstore_nxv2i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV32-NEXT: vnsrl.wi v11, v8, 0
; RV32-NEXT: vnsrl.wx v11, v8, zero
; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; RV32-NEXT: vnsrl.wi v8, v11, 0
; RV32-NEXT: vnsrl.wx v8, v11, zero
; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV64-NEXT: vnsrl.wi v12, v8, 0
; RV64-NEXT: vnsrl.wx v12, v8, zero
; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; RV64-NEXT: vnsrl.wi v8, v12, 0
; RV64-NEXT: vnsrl.wx v8, v12, zero
; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t
; RV64-NEXT: ret
%tval = trunc <vscale x 2 x i64> %val to <vscale x 2 x i8>
Expand Down Expand Up @@ -236,14 +236,14 @@ define void @mscatter_nxv2i32_truncstore_nxv2i16(<vscale x 2 x i32> %val, <vscal
; RV32-LABEL: mscatter_nxv2i32_truncstore_nxv2i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv2i32_truncstore_nxv2i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t
; RV64-NEXT: ret
%tval = trunc <vscale x 2 x i32> %val to <vscale x 2 x i16>
Expand All @@ -255,18 +255,18 @@ define void @mscatter_nxv2i64_truncstore_nxv2i16(<vscale x 2 x i64> %val, <vscal
; RV32-LABEL: mscatter_nxv2i64_truncstore_nxv2i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV32-NEXT: vnsrl.wi v11, v8, 0
; RV32-NEXT: vnsrl.wx v11, v8, zero
; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; RV32-NEXT: vnsrl.wi v8, v11, 0
; RV32-NEXT: vnsrl.wx v8, v11, zero
; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV64-NEXT: vnsrl.wi v12, v8, 0
; RV64-NEXT: vnsrl.wx v12, v8, zero
; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; RV64-NEXT: vnsrl.wi v8, v12, 0
; RV64-NEXT: vnsrl.wx v8, v12, zero
; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t
; RV64-NEXT: ret
%tval = trunc <vscale x 2 x i64> %val to <vscale x 2 x i16>
Expand Down Expand Up @@ -474,14 +474,14 @@ define void @mscatter_nxv2i64_truncstore_nxv2i32(<vscale x 2 x i64> %val, <vscal
; RV32-LABEL: mscatter_nxv2i64_truncstore_nxv2i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV32-NEXT: vnsrl.wi v11, v8, 0
; RV32-NEXT: vnsrl.wx v11, v8, zero
; RV32-NEXT: vsoxei32.v v11, (zero), v10, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV64-NEXT: vnsrl.wi v12, v8, 0
; RV64-NEXT: vnsrl.wx v12, v8, zero
; RV64-NEXT: vsoxei64.v v12, (zero), v10, v0.t
; RV64-NEXT: ret
%tval = trunc <vscale x 2 x i64> %val to <vscale x 2 x i32>
Expand Down Expand Up @@ -843,7 +843,7 @@ define void @mscatter_baseidx_sext_nxv8i8_nxv8i64(<vscale x 8 x i64> %val, i64*
; RV32-NEXT: vsext.vf8 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v24, v16, 0
; RV32-NEXT: vnsrl.wx v24, v16, zero
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand All @@ -868,7 +868,7 @@ define void @mscatter_baseidx_zext_nxv8i8_nxv8i64(<vscale x 8 x i64> %val, i64*
; RV32-NEXT: vzext.vf8 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v24, v16, 0
; RV32-NEXT: vnsrl.wx v24, v16, zero
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -915,7 +915,7 @@ define void @mscatter_baseidx_sext_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, i64*
; RV32-NEXT: vsext.vf4 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v24, v16, 0
; RV32-NEXT: vnsrl.wx v24, v16, zero
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand All @@ -940,7 +940,7 @@ define void @mscatter_baseidx_zext_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, i64*
; RV32-NEXT: vzext.vf4 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v24, v16, 0
; RV32-NEXT: vnsrl.wx v24, v16, zero
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -986,7 +986,7 @@ define void @mscatter_baseidx_sext_nxv8i32_nxv8i64(<vscale x 8 x i64> %val, i64*
; RV32-NEXT: vsext.vf2 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v24, v16, 0
; RV32-NEXT: vnsrl.wx v24, v16, zero
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand All @@ -1011,7 +1011,7 @@ define void @mscatter_baseidx_zext_nxv8i32_nxv8i64(<vscale x 8 x i64> %val, i64*
; RV32-NEXT: vzext.vf2 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v24, v16, 0
; RV32-NEXT: vnsrl.wx v24, v16, zero
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand All @@ -1035,7 +1035,7 @@ define void @mscatter_baseidx_nxv8i64(<vscale x 8 x i64> %val, i64* %base, <vsca
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v16, v16, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v24, v16, 0
; RV32-NEXT: vnsrl.wx v24, v16, zero
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1637,7 +1637,7 @@ define void @mscatter_baseidx_sext_nxv8i8_nxv8f64(<vscale x 8 x double> %val, do
; RV32-NEXT: vsext.vf8 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v24, v16, 0
; RV32-NEXT: vnsrl.wx v24, v16, zero
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand All @@ -1662,7 +1662,7 @@ define void @mscatter_baseidx_zext_nxv8i8_nxv8f64(<vscale x 8 x double> %val, do
; RV32-NEXT: vzext.vf8 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v24, v16, 0
; RV32-NEXT: vnsrl.wx v24, v16, zero
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1709,7 +1709,7 @@ define void @mscatter_baseidx_sext_nxv8i16_nxv8f64(<vscale x 8 x double> %val, d
; RV32-NEXT: vsext.vf4 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v24, v16, 0
; RV32-NEXT: vnsrl.wx v24, v16, zero
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand All @@ -1734,7 +1734,7 @@ define void @mscatter_baseidx_zext_nxv8i16_nxv8f64(<vscale x 8 x double> %val, d
; RV32-NEXT: vzext.vf4 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v24, v16, 0
; RV32-NEXT: vnsrl.wx v24, v16, zero
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1780,7 +1780,7 @@ define void @mscatter_baseidx_sext_nxv8i32_nxv8f64(<vscale x 8 x double> %val, d
; RV32-NEXT: vsext.vf2 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v24, v16, 0
; RV32-NEXT: vnsrl.wx v24, v16, zero
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand All @@ -1805,7 +1805,7 @@ define void @mscatter_baseidx_zext_nxv8i32_nxv8f64(<vscale x 8 x double> %val, d
; RV32-NEXT: vzext.vf2 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v24, v16, 0
; RV32-NEXT: vnsrl.wx v24, v16, zero
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand All @@ -1829,7 +1829,7 @@ define void @mscatter_baseidx_nxv8f64(<vscale x 8 x double> %val, double* %base,
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v16, v16, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wi v24, v16, 0
; RV32-NEXT: vnsrl.wx v24, v16, zero
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand Down
68 changes: 34 additions & 34 deletions llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll
Original file line number Diff line number Diff line change
Expand Up @@ -636,7 +636,7 @@ define <vscale x 1 x i8> @vfptosi_nxv1f32_nxv1i8(<vscale x 1 x float> %va) {
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v9, 0
; CHECK-NEXT: vnsrl.wx v8, v9, zero
; CHECK-NEXT: ret
%evec = fptosi <vscale x 1 x float> %va to <vscale x 1 x i8>
ret <vscale x 1 x i8> %evec
Expand All @@ -648,7 +648,7 @@ define <vscale x 1 x i8> @vfptoui_nxv1f32_nxv1i8(<vscale x 1 x float> %va) {
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v9, 0
; CHECK-NEXT: vnsrl.wx v8, v9, zero
; CHECK-NEXT: ret
%evec = fptoui <vscale x 1 x float> %va to <vscale x 1 x i8>
ret <vscale x 1 x i8> %evec
Expand Down Expand Up @@ -748,7 +748,7 @@ define <vscale x 2 x i8> @vfptosi_nxv2f32_nxv2i8(<vscale x 2 x float> %va) {
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v9, 0
; CHECK-NEXT: vnsrl.wx v8, v9, zero
; CHECK-NEXT: ret
%evec = fptosi <vscale x 2 x float> %va to <vscale x 2 x i8>
ret <vscale x 2 x i8> %evec
Expand All @@ -760,7 +760,7 @@ define <vscale x 2 x i8> @vfptoui_nxv2f32_nxv2i8(<vscale x 2 x float> %va) {
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v9, 0
; CHECK-NEXT: vnsrl.wx v8, v9, zero
; CHECK-NEXT: ret
%evec = fptoui <vscale x 2 x float> %va to <vscale x 2 x i8>
ret <vscale x 2 x i8> %evec
Expand Down Expand Up @@ -860,7 +860,7 @@ define <vscale x 4 x i8> @vfptosi_nxv4f32_nxv4i8(<vscale x 4 x float> %va) {
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: vnsrl.wx v8, v10, zero
; CHECK-NEXT: ret
%evec = fptosi <vscale x 4 x float> %va to <vscale x 4 x i8>
ret <vscale x 4 x i8> %evec
Expand All @@ -872,7 +872,7 @@ define <vscale x 4 x i8> @vfptoui_nxv4f32_nxv4i8(<vscale x 4 x float> %va) {
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: vnsrl.wx v8, v10, zero
; CHECK-NEXT: ret
%evec = fptoui <vscale x 4 x float> %va to <vscale x 4 x i8>
ret <vscale x 4 x i8> %evec
Expand Down Expand Up @@ -972,7 +972,7 @@ define <vscale x 8 x i8> @vfptosi_nxv8f32_nxv8i8(<vscale x 8 x float> %va) {
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v12, 0
; CHECK-NEXT: vnsrl.wx v8, v12, zero
; CHECK-NEXT: ret
%evec = fptosi <vscale x 8 x float> %va to <vscale x 8 x i8>
ret <vscale x 8 x i8> %evec
Expand All @@ -984,7 +984,7 @@ define <vscale x 8 x i8> @vfptoui_nxv8f32_nxv8i8(<vscale x 8 x float> %va) {
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v12, 0
; CHECK-NEXT: vnsrl.wx v8, v12, zero
; CHECK-NEXT: ret
%evec = fptoui <vscale x 8 x float> %va to <vscale x 8 x i8>
ret <vscale x 8 x i8> %evec
Expand Down Expand Up @@ -1084,7 +1084,7 @@ define <vscale x 16 x i8> @vfptosi_nxv16f32_nxv16i8(<vscale x 16 x float> %va) {
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8
; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v16, 0
; CHECK-NEXT: vnsrl.wx v8, v16, zero
; CHECK-NEXT: ret
%evec = fptosi <vscale x 16 x float> %va to <vscale x 16 x i8>
ret <vscale x 16 x i8> %evec
Expand All @@ -1096,7 +1096,7 @@ define <vscale x 16 x i8> @vfptoui_nxv16f32_nxv16i8(<vscale x 16 x float> %va) {
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8
; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v16, 0
; CHECK-NEXT: vnsrl.wx v8, v16, zero
; CHECK-NEXT: ret
%evec = fptoui <vscale x 16 x float> %va to <vscale x 16 x i8>
ret <vscale x 16 x i8> %evec
Expand Down Expand Up @@ -1174,9 +1174,9 @@ define <vscale x 1 x i8> @vfptosi_nxv1f64_nxv1i8(<vscale x 1 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v9, 0
; CHECK-NEXT: vnsrl.wx v8, v9, zero
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: ret
%evec = fptosi <vscale x 1 x double> %va to <vscale x 1 x i8>
ret <vscale x 1 x i8> %evec
Expand All @@ -1188,9 +1188,9 @@ define <vscale x 1 x i8> @vfptoui_nxv1f64_nxv1i8(<vscale x 1 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v9, 0
; CHECK-NEXT: vnsrl.wx v8, v9, zero
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: ret
%evec = fptoui <vscale x 1 x double> %va to <vscale x 1 x i8>
ret <vscale x 1 x i8> %evec
Expand All @@ -1202,7 +1202,7 @@ define <vscale x 1 x i16> @vfptosi_nxv1f64_nxv1i16(<vscale x 1 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v9, 0
; CHECK-NEXT: vnsrl.wx v8, v9, zero
; CHECK-NEXT: ret
%evec = fptosi <vscale x 1 x double> %va to <vscale x 1 x i16>
ret <vscale x 1 x i16> %evec
Expand All @@ -1214,7 +1214,7 @@ define <vscale x 1 x i16> @vfptoui_nxv1f64_nxv1i16(<vscale x 1 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v9, 0
; CHECK-NEXT: vnsrl.wx v8, v9, zero
; CHECK-NEXT: ret
%evec = fptoui <vscale x 1 x double> %va to <vscale x 1 x i16>
ret <vscale x 1 x i16> %evec
Expand Down Expand Up @@ -1292,9 +1292,9 @@ define <vscale x 2 x i8> @vfptosi_nxv2f64_nxv2i8(<vscale x 2 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: vnsrl.wx v8, v10, zero
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: ret
%evec = fptosi <vscale x 2 x double> %va to <vscale x 2 x i8>
ret <vscale x 2 x i8> %evec
Expand All @@ -1306,9 +1306,9 @@ define <vscale x 2 x i8> @vfptoui_nxv2f64_nxv2i8(<vscale x 2 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: vnsrl.wx v8, v10, zero
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: ret
%evec = fptoui <vscale x 2 x double> %va to <vscale x 2 x i8>
ret <vscale x 2 x i8> %evec
Expand All @@ -1320,7 +1320,7 @@ define <vscale x 2 x i16> @vfptosi_nxv2f64_nxv2i16(<vscale x 2 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: vnsrl.wx v8, v10, zero
; CHECK-NEXT: ret
%evec = fptosi <vscale x 2 x double> %va to <vscale x 2 x i16>
ret <vscale x 2 x i16> %evec
Expand All @@ -1332,7 +1332,7 @@ define <vscale x 2 x i16> @vfptoui_nxv2f64_nxv2i16(<vscale x 2 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: vnsrl.wx v8, v10, zero
; CHECK-NEXT: ret
%evec = fptoui <vscale x 2 x double> %va to <vscale x 2 x i16>
ret <vscale x 2 x i16> %evec
Expand Down Expand Up @@ -1410,9 +1410,9 @@ define <vscale x 4 x i8> @vfptosi_nxv4f64_nxv4i8(<vscale x 4 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v12, 0
; CHECK-NEXT: vnsrl.wx v8, v12, zero
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: ret
%evec = fptosi <vscale x 4 x double> %va to <vscale x 4 x i8>
ret <vscale x 4 x i8> %evec
Expand All @@ -1424,9 +1424,9 @@ define <vscale x 4 x i8> @vfptoui_nxv4f64_nxv4i8(<vscale x 4 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v12, 0
; CHECK-NEXT: vnsrl.wx v8, v12, zero
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: ret
%evec = fptoui <vscale x 4 x double> %va to <vscale x 4 x i8>
ret <vscale x 4 x i8> %evec
Expand All @@ -1438,7 +1438,7 @@ define <vscale x 4 x i16> @vfptosi_nxv4f64_nxv4i16(<vscale x 4 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v12, 0
; CHECK-NEXT: vnsrl.wx v8, v12, zero
; CHECK-NEXT: ret
%evec = fptosi <vscale x 4 x double> %va to <vscale x 4 x i16>
ret <vscale x 4 x i16> %evec
Expand All @@ -1450,7 +1450,7 @@ define <vscale x 4 x i16> @vfptoui_nxv4f64_nxv4i16(<vscale x 4 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v12, 0
; CHECK-NEXT: vnsrl.wx v8, v12, zero
; CHECK-NEXT: ret
%evec = fptoui <vscale x 4 x double> %va to <vscale x 4 x i16>
ret <vscale x 4 x i16> %evec
Expand Down Expand Up @@ -1528,9 +1528,9 @@ define <vscale x 8 x i8> @vfptosi_nxv8f64_nxv8i8(<vscale x 8 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vnsrl.wi v10, v16, 0
; CHECK-NEXT: vnsrl.wx v10, v16, zero
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: vnsrl.wx v8, v10, zero
; CHECK-NEXT: ret
%evec = fptosi <vscale x 8 x double> %va to <vscale x 8 x i8>
ret <vscale x 8 x i8> %evec
Expand All @@ -1542,9 +1542,9 @@ define <vscale x 8 x i8> @vfptoui_nxv8f64_nxv8i8(<vscale x 8 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vnsrl.wi v10, v16, 0
; CHECK-NEXT: vnsrl.wx v10, v16, zero
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: vnsrl.wx v8, v10, zero
; CHECK-NEXT: ret
%evec = fptoui <vscale x 8 x double> %va to <vscale x 8 x i8>
ret <vscale x 8 x i8> %evec
Expand All @@ -1556,7 +1556,7 @@ define <vscale x 8 x i16> @vfptosi_nxv8f64_nxv8i16(<vscale x 8 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v16, 0
; CHECK-NEXT: vnsrl.wx v8, v16, zero
; CHECK-NEXT: ret
%evec = fptosi <vscale x 8 x double> %va to <vscale x 8 x i16>
ret <vscale x 8 x i16> %evec
Expand All @@ -1568,7 +1568,7 @@ define <vscale x 8 x i16> @vfptoui_nxv8f64_nxv8i16(<vscale x 8 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v16, 0
; CHECK-NEXT: vnsrl.wx v8, v16, zero
; CHECK-NEXT: ret
%evec = fptoui <vscale x 8 x double> %va to <vscale x 8 x i16>
ret <vscale x 8 x i16> %evec
Expand Down
Loading