76 changes: 38 additions & 38 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
Original file line number Diff line number Diff line change
Expand Up @@ -26,15 +26,15 @@ define void @vpscatter_v2i16_truncstore_v2i8(<2 x i16> %val, <2 x i8*> %ptrs, <2
; RV32-LABEL: vpscatter_v2i16_truncstore_v2i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; RV32-NEXT: vncvt.x.x.w v8, v8
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_v2i16_truncstore_v2i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; RV64-NEXT: vncvt.x.x.w v8, v8
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t
; RV64-NEXT: ret
Expand All @@ -47,19 +47,19 @@ define void @vpscatter_v2i32_truncstore_v2i8(<2 x i32> %val, <2 x i8*> %ptrs, <2
; RV32-LABEL: vpscatter_v2i32_truncstore_v2i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; RV32-NEXT: vncvt.x.x.w v8, v8
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; RV32-NEXT: vncvt.x.x.w v8, v8
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_v2i32_truncstore_v2i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; RV64-NEXT: vncvt.x.x.w v8, v8
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; RV64-NEXT: vncvt.x.x.w v8, v8
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t
; RV64-NEXT: ret
Expand All @@ -72,23 +72,23 @@ define void @vpscatter_v2i64_truncstore_v2i8(<2 x i64> %val, <2 x i8*> %ptrs, <2
; RV32-LABEL: vpscatter_v2i64_truncstore_v2i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; RV32-NEXT: vncvt.x.x.w v8, v8
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; RV32-NEXT: vncvt.x.x.w v8, v8
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; RV32-NEXT: vncvt.x.x.w v8, v8
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_v2i64_truncstore_v2i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; RV64-NEXT: vncvt.x.x.w v8, v8
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; RV64-NEXT: vncvt.x.x.w v8, v8
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; RV64-NEXT: vncvt.x.x.w v8, v8
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t
; RV64-NEXT: ret
Expand Down Expand Up @@ -194,15 +194,15 @@ define void @vpscatter_v2i32_truncstore_v2i16(<2 x i32> %val, <2 x i16*> %ptrs,
; RV32-LABEL: vpscatter_v2i32_truncstore_v2i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; RV32-NEXT: vncvt.x.x.w v8, v8
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_v2i32_truncstore_v2i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; RV64-NEXT: vncvt.x.x.w v8, v8
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t
; RV64-NEXT: ret
Expand All @@ -215,19 +215,19 @@ define void @vpscatter_v2i64_truncstore_v2i16(<2 x i64> %val, <2 x i16*> %ptrs,
; RV32-LABEL: vpscatter_v2i64_truncstore_v2i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; RV32-NEXT: vncvt.x.x.w v8, v8
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; RV32-NEXT: vncvt.x.x.w v8, v8
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_v2i64_truncstore_v2i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; RV64-NEXT: vncvt.x.x.w v8, v8
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; RV64-NEXT: vncvt.x.x.w v8, v8
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t
; RV64-NEXT: ret
Expand Down Expand Up @@ -442,15 +442,15 @@ define void @vpscatter_v2i64_truncstore_v2i32(<2 x i64> %val, <2 x i32*> %ptrs,
; RV32-LABEL: vpscatter_v2i64_truncstore_v2i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; RV32-NEXT: vncvt.x.x.w v8, v8
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_v2i64_truncstore_v2i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; RV64-NEXT: vncvt.x.x.w v8, v8
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t
; RV64-NEXT: ret
Expand Down Expand Up @@ -779,7 +779,7 @@ define void @vpscatter_baseidx_sext_v8i8_v8i64(<8 x i64> %val, i64* %base, <8 x
; RV32-NEXT: vsext.vf8 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v12
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -805,7 +805,7 @@ define void @vpscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, i64* %base, <8 x
; RV32-NEXT: vzext.vf8 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v12
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -854,7 +854,7 @@ define void @vpscatter_baseidx_sext_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x
; RV32-NEXT: vsext.vf4 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v12
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -880,7 +880,7 @@ define void @vpscatter_baseidx_zext_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x
; RV32-NEXT: vzext.vf4 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v12
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -928,7 +928,7 @@ define void @vpscatter_baseidx_sext_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x
; RV32-NEXT: vsext.vf2 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v12
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -954,7 +954,7 @@ define void @vpscatter_baseidx_zext_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x
; RV32-NEXT: vzext.vf2 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v12
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -979,7 +979,7 @@ define void @vpscatter_baseidx_v8i64(<8 x i64> %val, i64* %base, <8 x i64> %idxs
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vsll.vi v12, v12, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v12
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1500,7 +1500,7 @@ define void @vpscatter_baseidx_sext_v8i8_v8f64(<8 x double> %val, double* %base,
; RV32-NEXT: vsext.vf8 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v12
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -1526,7 +1526,7 @@ define void @vpscatter_baseidx_zext_v8i8_v8f64(<8 x double> %val, double* %base,
; RV32-NEXT: vzext.vf8 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v12
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1575,7 +1575,7 @@ define void @vpscatter_baseidx_sext_v8i16_v8f64(<8 x double> %val, double* %base
; RV32-NEXT: vsext.vf4 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v12
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -1601,7 +1601,7 @@ define void @vpscatter_baseidx_zext_v8i16_v8f64(<8 x double> %val, double* %base
; RV32-NEXT: vzext.vf4 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v12
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1649,7 +1649,7 @@ define void @vpscatter_baseidx_sext_v8i32_v8f64(<8 x double> %val, double* %base
; RV32-NEXT: vsext.vf2 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v12
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -1675,7 +1675,7 @@ define void @vpscatter_baseidx_zext_v8i32_v8f64(<8 x double> %val, double* %base
; RV32-NEXT: vzext.vf2 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v12
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -1700,7 +1700,7 @@ define void @vpscatter_baseidx_v8f64(<8 x double> %val, double* %base, <8 x i64>
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vsll.vi v12, v12, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v12
; RV32-NEXT: vnsrl.wi v16, v12, 0
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1924,7 +1924,7 @@ define void @vpscatter_baseidx_sext_v32i32_v32f64(<32 x double> %val, double* %b
; RV32-NEXT: vsext.vf2 v24, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v8
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: addi a1, a2, -16
; RV32-NEXT: addi a4, sp, 16
Expand All @@ -1937,7 +1937,7 @@ define void @vpscatter_baseidx_sext_v32i32_v32f64(<32 x double> %val, double* %b
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, a3, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v8
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu
; RV32-NEXT: vslidedown.vi v0, v0, 2
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu
Expand Down Expand Up @@ -2058,7 +2058,7 @@ define void @vpscatter_baseidx_zext_v32i32_v32f64(<32 x double> %val, double* %b
; RV32-NEXT: vzext.vf2 v24, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v8
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: addi a1, a2, -16
; RV32-NEXT: addi a4, sp, 16
Expand All @@ -2071,7 +2071,7 @@ define void @vpscatter_baseidx_zext_v32i32_v32f64(<32 x double> %val, double* %b
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, a3, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v8
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu
; RV32-NEXT: vslidedown.vi v0, v0, 2
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/RISCV/rvv/fptosi-sat.ll
Original file line number Diff line number Diff line change
Expand Up @@ -408,7 +408,7 @@ define <vscale x 4 x i16> @test_signed_v4f64_v4i16(<vscale x 4 x double> %f) {
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v12
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK-NEXT: vncvt.x.x.w v12, v16
; CHECK-NEXT: vnsrl.wi v12, v16, 0
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
Expand All @@ -431,7 +431,7 @@ define <vscale x 8 x i16> @test_signed_v8f64_v8i16(<vscale x 8 x double> %f) {
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v24, v16
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v16, v24
; CHECK-NEXT: vnsrl.wi v16, v24, 0
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmfne.vv v0, v8, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/RISCV/rvv/fptoui-sat.ll
Original file line number Diff line number Diff line change
Expand Up @@ -249,7 +249,7 @@ define <vscale x 4 x i16> @test_signed_v4f64_v4i16(<vscale x 4 x double> %f) {
; CHECK32-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK32-NEXT: vfncvt.rtz.xu.f.w v12, v8
; CHECK32-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK32-NEXT: vncvt.x.x.w v8, v12
; CHECK32-NEXT: vnsrl.wi v8, v12, 0
; CHECK32-NEXT: ret
;
; CHECK64-LABEL: test_signed_v4f64_v4i16:
Expand All @@ -263,7 +263,7 @@ define <vscale x 4 x i16> @test_signed_v4f64_v4i16(<vscale x 4 x double> %f) {
; CHECK64-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK64-NEXT: vfncvt.rtz.xu.f.w v12, v8
; CHECK64-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK64-NEXT: vncvt.x.x.w v8, v12
; CHECK64-NEXT: vnsrl.wi v8, v12, 0
; CHECK64-NEXT: ret
%x = call <vscale x 4 x i16> @llvm.fptoui.sat.nxv4f64.nxv4i16(<vscale x 4 x double> %f)
ret <vscale x 4 x i16> %x
Expand All @@ -281,7 +281,7 @@ define <vscale x 8 x i16> @test_signed_v8f64_v8i16(<vscale x 8 x double> %f) {
; CHECK32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK32-NEXT: vfncvt.rtz.xu.f.w v16, v8
; CHECK32-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK32-NEXT: vncvt.x.x.w v8, v16
; CHECK32-NEXT: vnsrl.wi v8, v16, 0
; CHECK32-NEXT: ret
;
; CHECK64-LABEL: test_signed_v8f64_v8i16:
Expand All @@ -295,7 +295,7 @@ define <vscale x 8 x i16> @test_signed_v8f64_v8i16(<vscale x 8 x double> %f) {
; CHECK64-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK64-NEXT: vfncvt.rtz.xu.f.w v16, v8
; CHECK64-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK64-NEXT: vncvt.x.x.w v8, v16
; CHECK64-NEXT: vnsrl.wi v8, v16, 0
; CHECK64-NEXT: ret
%x = call <vscale x 8 x i16> @llvm.fptoui.sat.nxv8f64.nxv8i16(<vscale x 8 x double> %f)
ret <vscale x 8 x i16> %x
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@ define <vscale x 4 x i5> @trunc_nxv4i32_to_nxv4i5(<vscale x 4 x i32> %a) {
; CHECK-LABEL: trunc_nxv4i32_to_nxv4i5:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vncvt.x.x.w v10, v8
; CHECK-NEXT: vnsrl.wi v10, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v10
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: ret
%v = trunc <vscale x 4 x i32> %a to <vscale x 4 x i5>
ret <vscale x 4 x i5> %v
Expand All @@ -18,9 +18,9 @@ define <vscale x 1 x i5> @trunc_nxv1i32_to_nxv1i5(<vscale x 1 x i32> %a) {
; CHECK-LABEL: trunc_nxv1i32_to_nxv1i5:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%v = trunc <vscale x 1 x i32> %a to <vscale x 1 x i5>
ret <vscale x 1 x i5> %v
Expand Down
28 changes: 14 additions & 14 deletions llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1038,7 +1038,7 @@ define <vscale x 8 x i64> @mgather_baseidx_sext_nxv8i8_nxv8i64(i64* %base, <vsca
; RV32-NEXT: vsext.vf8 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v24, v8
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand All @@ -1065,7 +1065,7 @@ define <vscale x 8 x i64> @mgather_baseidx_zext_nxv8i8_nxv8i64(i64* %base, <vsca
; RV32-NEXT: vzext.vf8 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v24, v8
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand Down Expand Up @@ -1116,7 +1116,7 @@ define <vscale x 8 x i64> @mgather_baseidx_sext_nxv8i16_nxv8i64(i64* %base, <vsc
; RV32-NEXT: vsext.vf4 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v24, v8
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand All @@ -1143,7 +1143,7 @@ define <vscale x 8 x i64> @mgather_baseidx_zext_nxv8i16_nxv8i64(i64* %base, <vsc
; RV32-NEXT: vzext.vf4 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v24, v8
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand Down Expand Up @@ -1193,7 +1193,7 @@ define <vscale x 8 x i64> @mgather_baseidx_sext_nxv8i32_nxv8i64(i64* %base, <vsc
; RV32-NEXT: vsext.vf2 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v24, v8
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand All @@ -1220,7 +1220,7 @@ define <vscale x 8 x i64> @mgather_baseidx_zext_nxv8i32_nxv8i64(i64* %base, <vsc
; RV32-NEXT: vzext.vf2 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v24, v8
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand All @@ -1246,7 +1246,7 @@ define <vscale x 8 x i64> @mgather_baseidx_nxv8i64(i64* %base, <vscale x 8 x i64
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v8, v8, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v24, v8
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand Down Expand Up @@ -1969,7 +1969,7 @@ define <vscale x 8 x double> @mgather_baseidx_sext_nxv8i8_nxv8f64(double* %base,
; RV32-NEXT: vsext.vf8 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v24, v8
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand All @@ -1996,7 +1996,7 @@ define <vscale x 8 x double> @mgather_baseidx_zext_nxv8i8_nxv8f64(double* %base,
; RV32-NEXT: vzext.vf8 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v24, v8
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand Down Expand Up @@ -2047,7 +2047,7 @@ define <vscale x 8 x double> @mgather_baseidx_sext_nxv8i16_nxv8f64(double* %base
; RV32-NEXT: vsext.vf4 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v24, v8
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand All @@ -2074,7 +2074,7 @@ define <vscale x 8 x double> @mgather_baseidx_zext_nxv8i16_nxv8f64(double* %base
; RV32-NEXT: vzext.vf4 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v24, v8
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand Down Expand Up @@ -2124,7 +2124,7 @@ define <vscale x 8 x double> @mgather_baseidx_sext_nxv8i32_nxv8f64(double* %base
; RV32-NEXT: vsext.vf2 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v24, v8
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand All @@ -2151,7 +2151,7 @@ define <vscale x 8 x double> @mgather_baseidx_zext_nxv8i32_nxv8f64(double* %base
; RV32-NEXT: vzext.vf2 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v24, v8
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand All @@ -2177,7 +2177,7 @@ define <vscale x 8 x double> @mgather_baseidx_nxv8f64(double* %base, <vscale x 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v8, v8, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v24, v8
; RV32-NEXT: vnsrl.wi v24, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand Down
68 changes: 34 additions & 34 deletions llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
Original file line number Diff line number Diff line change
Expand Up @@ -44,14 +44,14 @@ define void @mscatter_nxv2i16_truncstore_nxv2i8(<vscale x 2 x i16> %val, <vscale
; RV32-LABEL: mscatter_nxv2i16_truncstore_nxv2i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; RV32-NEXT: vncvt.x.x.w v8, v8
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv2i16_truncstore_nxv2i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; RV64-NEXT: vncvt.x.x.w v8, v8
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t
; RV64-NEXT: ret
%tval = trunc <vscale x 2 x i16> %val to <vscale x 2 x i8>
Expand All @@ -63,18 +63,18 @@ define void @mscatter_nxv2i32_truncstore_nxv2i8(<vscale x 2 x i32> %val, <vscale
; RV32-LABEL: mscatter_nxv2i32_truncstore_nxv2i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; RV32-NEXT: vncvt.x.x.w v8, v8
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; RV32-NEXT: vncvt.x.x.w v8, v8
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv2i32_truncstore_nxv2i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; RV64-NEXT: vncvt.x.x.w v8, v8
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; RV64-NEXT: vncvt.x.x.w v8, v8
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t
; RV64-NEXT: ret
%tval = trunc <vscale x 2 x i32> %val to <vscale x 2 x i8>
Expand All @@ -86,22 +86,22 @@ define void @mscatter_nxv2i64_truncstore_nxv2i8(<vscale x 2 x i64> %val, <vscale
; RV32-LABEL: mscatter_nxv2i64_truncstore_nxv2i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV32-NEXT: vncvt.x.x.w v11, v8
; RV32-NEXT: vnsrl.wi v11, v8, 0
; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; RV32-NEXT: vncvt.x.x.w v8, v11
; RV32-NEXT: vnsrl.wi v8, v11, 0
; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; RV32-NEXT: vncvt.x.x.w v8, v8
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV64-NEXT: vncvt.x.x.w v12, v8
; RV64-NEXT: vnsrl.wi v12, v8, 0
; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; RV64-NEXT: vncvt.x.x.w v8, v12
; RV64-NEXT: vnsrl.wi v8, v12, 0
; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; RV64-NEXT: vncvt.x.x.w v8, v8
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t
; RV64-NEXT: ret
%tval = trunc <vscale x 2 x i64> %val to <vscale x 2 x i8>
Expand Down Expand Up @@ -232,14 +232,14 @@ define void @mscatter_nxv2i32_truncstore_nxv2i16(<vscale x 2 x i32> %val, <vscal
; RV32-LABEL: mscatter_nxv2i32_truncstore_nxv2i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; RV32-NEXT: vncvt.x.x.w v8, v8
; RV32-NEXT: vnsrl.wi v8, v8, 0
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv2i32_truncstore_nxv2i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; RV64-NEXT: vncvt.x.x.w v8, v8
; RV64-NEXT: vnsrl.wi v8, v8, 0
; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t
; RV64-NEXT: ret
%tval = trunc <vscale x 2 x i32> %val to <vscale x 2 x i16>
Expand All @@ -251,18 +251,18 @@ define void @mscatter_nxv2i64_truncstore_nxv2i16(<vscale x 2 x i64> %val, <vscal
; RV32-LABEL: mscatter_nxv2i64_truncstore_nxv2i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV32-NEXT: vncvt.x.x.w v11, v8
; RV32-NEXT: vnsrl.wi v11, v8, 0
; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; RV32-NEXT: vncvt.x.x.w v8, v11
; RV32-NEXT: vnsrl.wi v8, v11, 0
; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV64-NEXT: vncvt.x.x.w v12, v8
; RV64-NEXT: vnsrl.wi v12, v8, 0
; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; RV64-NEXT: vncvt.x.x.w v8, v12
; RV64-NEXT: vnsrl.wi v8, v12, 0
; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t
; RV64-NEXT: ret
%tval = trunc <vscale x 2 x i64> %val to <vscale x 2 x i16>
Expand Down Expand Up @@ -466,14 +466,14 @@ define void @mscatter_nxv2i64_truncstore_nxv2i32(<vscale x 2 x i64> %val, <vscal
; RV32-LABEL: mscatter_nxv2i64_truncstore_nxv2i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV32-NEXT: vncvt.x.x.w v11, v8
; RV32-NEXT: vnsrl.wi v11, v8, 0
; RV32-NEXT: vsoxei32.v v11, (zero), v10, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV64-NEXT: vncvt.x.x.w v12, v8
; RV64-NEXT: vnsrl.wi v12, v8, 0
; RV64-NEXT: vsoxei64.v v12, (zero), v10, v0.t
; RV64-NEXT: ret
%tval = trunc <vscale x 2 x i64> %val to <vscale x 2 x i32>
Expand Down Expand Up @@ -827,7 +827,7 @@ define void @mscatter_baseidx_sext_nxv8i8_nxv8i64(<vscale x 8 x i64> %val, i64*
; RV32-NEXT: vsext.vf8 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v24, v16
; RV32-NEXT: vnsrl.wi v24, v16, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand All @@ -852,7 +852,7 @@ define void @mscatter_baseidx_zext_nxv8i8_nxv8i64(<vscale x 8 x i64> %val, i64*
; RV32-NEXT: vzext.vf8 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v24, v16
; RV32-NEXT: vnsrl.wi v24, v16, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -899,7 +899,7 @@ define void @mscatter_baseidx_sext_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, i64*
; RV32-NEXT: vsext.vf4 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v24, v16
; RV32-NEXT: vnsrl.wi v24, v16, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand All @@ -924,7 +924,7 @@ define void @mscatter_baseidx_zext_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, i64*
; RV32-NEXT: vzext.vf4 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v24, v16
; RV32-NEXT: vnsrl.wi v24, v16, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -970,7 +970,7 @@ define void @mscatter_baseidx_sext_nxv8i32_nxv8i64(<vscale x 8 x i64> %val, i64*
; RV32-NEXT: vsext.vf2 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v24, v16
; RV32-NEXT: vnsrl.wi v24, v16, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand All @@ -995,7 +995,7 @@ define void @mscatter_baseidx_zext_nxv8i32_nxv8i64(<vscale x 8 x i64> %val, i64*
; RV32-NEXT: vzext.vf2 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v24, v16
; RV32-NEXT: vnsrl.wi v24, v16, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand All @@ -1019,7 +1019,7 @@ define void @mscatter_baseidx_nxv8i64(<vscale x 8 x i64> %val, i64* %base, <vsca
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v16, v16, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v24, v16
; RV32-NEXT: vnsrl.wi v24, v16, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1609,7 +1609,7 @@ define void @mscatter_baseidx_sext_nxv8i8_nxv8f64(<vscale x 8 x double> %val, do
; RV32-NEXT: vsext.vf8 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v24, v16
; RV32-NEXT: vnsrl.wi v24, v16, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand All @@ -1634,7 +1634,7 @@ define void @mscatter_baseidx_zext_nxv8i8_nxv8f64(<vscale x 8 x double> %val, do
; RV32-NEXT: vzext.vf8 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v24, v16
; RV32-NEXT: vnsrl.wi v24, v16, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1681,7 +1681,7 @@ define void @mscatter_baseidx_sext_nxv8i16_nxv8f64(<vscale x 8 x double> %val, d
; RV32-NEXT: vsext.vf4 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v24, v16
; RV32-NEXT: vnsrl.wi v24, v16, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand All @@ -1706,7 +1706,7 @@ define void @mscatter_baseidx_zext_nxv8i16_nxv8f64(<vscale x 8 x double> %val, d
; RV32-NEXT: vzext.vf4 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v24, v16
; RV32-NEXT: vnsrl.wi v24, v16, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1752,7 +1752,7 @@ define void @mscatter_baseidx_sext_nxv8i32_nxv8f64(<vscale x 8 x double> %val, d
; RV32-NEXT: vsext.vf2 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v24, v16
; RV32-NEXT: vnsrl.wi v24, v16, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand All @@ -1777,7 +1777,7 @@ define void @mscatter_baseidx_zext_nxv8i32_nxv8f64(<vscale x 8 x double> %val, d
; RV32-NEXT: vzext.vf2 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v24, v16
; RV32-NEXT: vnsrl.wi v24, v16, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand All @@ -1801,7 +1801,7 @@ define void @mscatter_baseidx_nxv8f64(<vscale x 8 x double> %val, double* %base,
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v16, v16, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v24, v16
; RV32-NEXT: vnsrl.wi v24, v16, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ define <vscale x 2 x i32> @vpmerge_vptrunc(<vscale x 2 x i32> %passthru, <vscale
; CHECK-LABEL: vpmerge_vptrunc:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
; CHECK-NEXT: vncvt.x.x.w v8, v10, v0.t
; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t
; CHECK-NEXT: ret
; MIR-LABEL: name: vpmerge_vptrunc
; MIR: bb.0 (%ir-block.0):
Expand All @@ -240,7 +240,7 @@ define <vscale x 2 x i32> @vpmerge_vptrunc(<vscale x 2 x i32> %passthru, <vscale
; MIR-NEXT: [[COPY2:%[0-9]+]]:vrm2 = COPY $v10m2
; MIR-NEXT: [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
; MIR-NEXT: $v0 = COPY [[COPY1]]
; MIR-NEXT: early-clobber %4:vrnov0 = PseudoVNSRL_WX_M1_MASK [[COPY3]], [[COPY2]], $x0, $v0, [[COPY]], 5 /* e32 */, 0
; MIR-NEXT: early-clobber %4:vrnov0 = PseudoVNSRL_WI_M1_MASK [[COPY3]], [[COPY2]], 0, $v0, [[COPY]], 5 /* e32 */, 0
; MIR-NEXT: $v8 = COPY %4
; MIR-NEXT: PseudoRET implicit $v8
%splat = insertelement <vscale x 2 x i1> poison, i1 -1, i32 0
Expand Down
68 changes: 34 additions & 34 deletions llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll
Original file line number Diff line number Diff line change
Expand Up @@ -658,7 +658,7 @@ define <vscale x 1 x i8> @vfptosi_nxv1f32_nxv1i8(<vscale x 1 x float> %va) {
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v9
; CHECK-NEXT: vnsrl.wi v8, v9, 0
; CHECK-NEXT: ret
%evec = fptosi <vscale x 1 x float> %va to <vscale x 1 x i8>
ret <vscale x 1 x i8> %evec
Expand All @@ -670,7 +670,7 @@ define <vscale x 1 x i8> @vfptoui_nxv1f32_nxv1i8(<vscale x 1 x float> %va) {
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v9
; CHECK-NEXT: vnsrl.wi v8, v9, 0
; CHECK-NEXT: ret
%evec = fptoui <vscale x 1 x float> %va to <vscale x 1 x i8>
ret <vscale x 1 x i8> %evec
Expand Down Expand Up @@ -770,7 +770,7 @@ define <vscale x 2 x i8> @vfptosi_nxv2f32_nxv2i8(<vscale x 2 x float> %va) {
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v9
; CHECK-NEXT: vnsrl.wi v8, v9, 0
; CHECK-NEXT: ret
%evec = fptosi <vscale x 2 x float> %va to <vscale x 2 x i8>
ret <vscale x 2 x i8> %evec
Expand All @@ -782,7 +782,7 @@ define <vscale x 2 x i8> @vfptoui_nxv2f32_nxv2i8(<vscale x 2 x float> %va) {
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v9
; CHECK-NEXT: vnsrl.wi v8, v9, 0
; CHECK-NEXT: ret
%evec = fptoui <vscale x 2 x float> %va to <vscale x 2 x i8>
ret <vscale x 2 x i8> %evec
Expand Down Expand Up @@ -882,7 +882,7 @@ define <vscale x 4 x i8> @vfptosi_nxv4f32_nxv4i8(<vscale x 4 x float> %va) {
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v10
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: ret
%evec = fptosi <vscale x 4 x float> %va to <vscale x 4 x i8>
ret <vscale x 4 x i8> %evec
Expand All @@ -894,7 +894,7 @@ define <vscale x 4 x i8> @vfptoui_nxv4f32_nxv4i8(<vscale x 4 x float> %va) {
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v10
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: ret
%evec = fptoui <vscale x 4 x float> %va to <vscale x 4 x i8>
ret <vscale x 4 x i8> %evec
Expand Down Expand Up @@ -994,7 +994,7 @@ define <vscale x 8 x i8> @vfptosi_nxv8f32_nxv8i8(<vscale x 8 x float> %va) {
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v12
; CHECK-NEXT: vnsrl.wi v8, v12, 0
; CHECK-NEXT: ret
%evec = fptosi <vscale x 8 x float> %va to <vscale x 8 x i8>
ret <vscale x 8 x i8> %evec
Expand All @@ -1006,7 +1006,7 @@ define <vscale x 8 x i8> @vfptoui_nxv8f32_nxv8i8(<vscale x 8 x float> %va) {
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v12
; CHECK-NEXT: vnsrl.wi v8, v12, 0
; CHECK-NEXT: ret
%evec = fptoui <vscale x 8 x float> %va to <vscale x 8 x i8>
ret <vscale x 8 x i8> %evec
Expand Down Expand Up @@ -1106,7 +1106,7 @@ define <vscale x 16 x i8> @vfptosi_nxv16f32_nxv16i8(<vscale x 16 x float> %va) {
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8
; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v16
; CHECK-NEXT: vnsrl.wi v8, v16, 0
; CHECK-NEXT: ret
%evec = fptosi <vscale x 16 x float> %va to <vscale x 16 x i8>
ret <vscale x 16 x i8> %evec
Expand All @@ -1118,7 +1118,7 @@ define <vscale x 16 x i8> @vfptoui_nxv16f32_nxv16i8(<vscale x 16 x float> %va) {
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8
; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v16
; CHECK-NEXT: vnsrl.wi v8, v16, 0
; CHECK-NEXT: ret
%evec = fptoui <vscale x 16 x float> %va to <vscale x 16 x i8>
ret <vscale x 16 x i8> %evec
Expand Down Expand Up @@ -1196,9 +1196,9 @@ define <vscale x 1 x i8> @vfptosi_nxv1f64_nxv1i8(<vscale x 1 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v9
; CHECK-NEXT: vnsrl.wi v8, v9, 0
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%evec = fptosi <vscale x 1 x double> %va to <vscale x 1 x i8>
ret <vscale x 1 x i8> %evec
Expand All @@ -1210,9 +1210,9 @@ define <vscale x 1 x i8> @vfptoui_nxv1f64_nxv1i8(<vscale x 1 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v9
; CHECK-NEXT: vnsrl.wi v8, v9, 0
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%evec = fptoui <vscale x 1 x double> %va to <vscale x 1 x i8>
ret <vscale x 1 x i8> %evec
Expand All @@ -1224,7 +1224,7 @@ define <vscale x 1 x i16> @vfptosi_nxv1f64_nxv1i16(<vscale x 1 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v9
; CHECK-NEXT: vnsrl.wi v8, v9, 0
; CHECK-NEXT: ret
%evec = fptosi <vscale x 1 x double> %va to <vscale x 1 x i16>
ret <vscale x 1 x i16> %evec
Expand All @@ -1236,7 +1236,7 @@ define <vscale x 1 x i16> @vfptoui_nxv1f64_nxv1i16(<vscale x 1 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v9
; CHECK-NEXT: vnsrl.wi v8, v9, 0
; CHECK-NEXT: ret
%evec = fptoui <vscale x 1 x double> %va to <vscale x 1 x i16>
ret <vscale x 1 x i16> %evec
Expand Down Expand Up @@ -1314,9 +1314,9 @@ define <vscale x 2 x i8> @vfptosi_nxv2f64_nxv2i8(<vscale x 2 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v10
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%evec = fptosi <vscale x 2 x double> %va to <vscale x 2 x i8>
ret <vscale x 2 x i8> %evec
Expand All @@ -1328,9 +1328,9 @@ define <vscale x 2 x i8> @vfptoui_nxv2f64_nxv2i8(<vscale x 2 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v10
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%evec = fptoui <vscale x 2 x double> %va to <vscale x 2 x i8>
ret <vscale x 2 x i8> %evec
Expand All @@ -1342,7 +1342,7 @@ define <vscale x 2 x i16> @vfptosi_nxv2f64_nxv2i16(<vscale x 2 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v10
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: ret
%evec = fptosi <vscale x 2 x double> %va to <vscale x 2 x i16>
ret <vscale x 2 x i16> %evec
Expand All @@ -1354,7 +1354,7 @@ define <vscale x 2 x i16> @vfptoui_nxv2f64_nxv2i16(<vscale x 2 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v10
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: ret
%evec = fptoui <vscale x 2 x double> %va to <vscale x 2 x i16>
ret <vscale x 2 x i16> %evec
Expand Down Expand Up @@ -1432,9 +1432,9 @@ define <vscale x 4 x i8> @vfptosi_nxv4f64_nxv4i8(<vscale x 4 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v12
; CHECK-NEXT: vnsrl.wi v8, v12, 0
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%evec = fptosi <vscale x 4 x double> %va to <vscale x 4 x i8>
ret <vscale x 4 x i8> %evec
Expand All @@ -1446,9 +1446,9 @@ define <vscale x 4 x i8> @vfptoui_nxv4f64_nxv4i8(<vscale x 4 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v12
; CHECK-NEXT: vnsrl.wi v8, v12, 0
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%evec = fptoui <vscale x 4 x double> %va to <vscale x 4 x i8>
ret <vscale x 4 x i8> %evec
Expand All @@ -1460,7 +1460,7 @@ define <vscale x 4 x i16> @vfptosi_nxv4f64_nxv4i16(<vscale x 4 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v12
; CHECK-NEXT: vnsrl.wi v8, v12, 0
; CHECK-NEXT: ret
%evec = fptosi <vscale x 4 x double> %va to <vscale x 4 x i16>
ret <vscale x 4 x i16> %evec
Expand All @@ -1472,7 +1472,7 @@ define <vscale x 4 x i16> @vfptoui_nxv4f64_nxv4i16(<vscale x 4 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v12
; CHECK-NEXT: vnsrl.wi v8, v12, 0
; CHECK-NEXT: ret
%evec = fptoui <vscale x 4 x double> %va to <vscale x 4 x i16>
ret <vscale x 4 x i16> %evec
Expand Down Expand Up @@ -1550,9 +1550,9 @@ define <vscale x 8 x i8> @vfptosi_nxv8f64_nxv8i8(<vscale x 8 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v10, v16
; CHECK-NEXT: vnsrl.wi v10, v16, 0
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v10
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: ret
%evec = fptosi <vscale x 8 x double> %va to <vscale x 8 x i8>
ret <vscale x 8 x i8> %evec
Expand All @@ -1564,9 +1564,9 @@ define <vscale x 8 x i8> @vfptoui_nxv8f64_nxv8i8(<vscale x 8 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v10, v16
; CHECK-NEXT: vnsrl.wi v10, v16, 0
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v10
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: ret
%evec = fptoui <vscale x 8 x double> %va to <vscale x 8 x i8>
ret <vscale x 8 x i8> %evec
Expand All @@ -1578,7 +1578,7 @@ define <vscale x 8 x i16> @vfptosi_nxv8f64_nxv8i16(<vscale x 8 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v16
; CHECK-NEXT: vnsrl.wi v8, v16, 0
; CHECK-NEXT: ret
%evec = fptosi <vscale x 8 x double> %va to <vscale x 8 x i16>
ret <vscale x 8 x i16> %evec
Expand All @@ -1590,7 +1590,7 @@ define <vscale x 8 x i16> @vfptoui_nxv8f64_nxv8i16(<vscale x 8 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v16
; CHECK-NEXT: vnsrl.wi v8, v16, 0
; CHECK-NEXT: ret
%evec = fptoui <vscale x 8 x double> %va to <vscale x 8 x i16>
ret <vscale x 8 x i16> %evec
Expand Down
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ define <vscale x 2 x i8> @vfptosi_nxv2i8_nxv2f32(<vscale x 2 x float> %va, <vsca
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v9, v0.t
; CHECK-NEXT: vnsrl.wi v8, v9, 0, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.fptosi.nxv2i8.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
ret <vscale x 2 x i8> %v
Expand All @@ -131,7 +131,7 @@ define <vscale x 2 x i8> @vfptosi_nxv2i8_nxv2f32_unmasked(<vscale x 2 x float> %
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v9
; CHECK-NEXT: vnsrl.wi v8, v9, 0
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.fptosi.nxv2i8.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
ret <vscale x 2 x i8> %v
Expand Down Expand Up @@ -215,9 +215,9 @@ define <vscale x 2 x i8> @vfptosi_nxv2i8_nxv2f64(<vscale x 2 x double> %va, <vsc
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v10, v0.t
; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t
; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.fptosi.nxv2i8.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
ret <vscale x 2 x i8> %v
Expand All @@ -229,9 +229,9 @@ define <vscale x 2 x i8> @vfptosi_nxv2i8_nxv2f64_unmasked(<vscale x 2 x double>
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v10
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.fptosi.nxv2i8.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
ret <vscale x 2 x i8> %v
Expand All @@ -245,7 +245,7 @@ define <vscale x 2 x i16> @vfptosi_nxv2i16_nxv2f64(<vscale x 2 x double> %va, <v
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v10, v0.t
; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
ret <vscale x 2 x i16> %v
Expand All @@ -257,7 +257,7 @@ define <vscale x 2 x i16> @vfptosi_nxv2i16_nxv2f64_unmasked(<vscale x 2 x double
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v10
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
ret <vscale x 2 x i16> %v
Expand Down
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ define <vscale x 2 x i8> @vfptoui_nxv2i8_nxv2f32(<vscale x 2 x float> %va, <vsca
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v9, v0.t
; CHECK-NEXT: vnsrl.wi v8, v9, 0, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.fptoui.nxv2i8.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> %m, i32 %evl)
ret <vscale x 2 x i8> %v
Expand All @@ -131,7 +131,7 @@ define <vscale x 2 x i8> @vfptoui_nxv2i8_nxv2f32_unmasked(<vscale x 2 x float> %
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v9
; CHECK-NEXT: vnsrl.wi v8, v9, 0
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.fptoui.nxv2i8.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
ret <vscale x 2 x i8> %v
Expand Down Expand Up @@ -215,9 +215,9 @@ define <vscale x 2 x i8> @vfptoui_nxv2i8_nxv2f64(<vscale x 2 x double> %va, <vsc
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v10, v0.t
; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t
; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.fptoui.nxv2i8.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
ret <vscale x 2 x i8> %v
Expand All @@ -229,9 +229,9 @@ define <vscale x 2 x i8> @vfptoui_nxv2i8_nxv2f64_unmasked(<vscale x 2 x double>
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v10
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.fptoui.nxv2i8.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
ret <vscale x 2 x i8> %v
Expand All @@ -245,7 +245,7 @@ define <vscale x 2 x i16> @vfptoui_nxv2i16_nxv2f64(<vscale x 2 x double> %va, <v
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v10, v0.t
; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.fptoui.nxv2i16.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 %evl)
ret <vscale x 2 x i16> %v
Expand All @@ -257,7 +257,7 @@ define <vscale x 2 x i16> @vfptoui_nxv2i16_nxv2f64_unmasked(<vscale x 2 x double
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v10
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.fptoui.nxv2i16.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %evl)
ret <vscale x 2 x i16> %v
Expand Down
50 changes: 25 additions & 25 deletions llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1056,7 +1056,7 @@ define <vscale x 8 x i64> @vpgather_baseidx_sext_nxv8i8_nxv8i64(i64* %base, <vsc
; RV32-NEXT: vsext.vf8 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v8
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -1082,7 +1082,7 @@ define <vscale x 8 x i64> @vpgather_baseidx_zext_nxv8i8_nxv8i64(i64* %base, <vsc
; RV32-NEXT: vzext.vf8 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v8
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1131,7 +1131,7 @@ define <vscale x 8 x i64> @vpgather_baseidx_sext_nxv8i16_nxv8i64(i64* %base, <vs
; RV32-NEXT: vsext.vf4 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v8
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -1157,7 +1157,7 @@ define <vscale x 8 x i64> @vpgather_baseidx_zext_nxv8i16_nxv8i64(i64* %base, <vs
; RV32-NEXT: vzext.vf4 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v8
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1205,7 +1205,7 @@ define <vscale x 8 x i64> @vpgather_baseidx_sext_nxv8i32_nxv8i64(i64* %base, <vs
; RV32-NEXT: vsext.vf2 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v8
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -1231,7 +1231,7 @@ define <vscale x 8 x i64> @vpgather_baseidx_zext_nxv8i32_nxv8i64(i64* %base, <vs
; RV32-NEXT: vzext.vf2 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v8
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -1256,7 +1256,7 @@ define <vscale x 8 x i64> @vpgather_baseidx_nxv8i64(i64* %base, <vscale x 8 x i6
; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v8, v8, 3
; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v8
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1851,7 +1851,7 @@ define <vscale x 6 x double> @vpgather_baseidx_sext_nxv6i8_nxv6f64(double* %base
; RV32-NEXT: vsext.vf8 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v8
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -1877,7 +1877,7 @@ define <vscale x 6 x double> @vpgather_baseidx_zext_nxv6i8_nxv6f64(double* %base
; RV32-NEXT: vzext.vf8 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v8
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1926,7 +1926,7 @@ define <vscale x 6 x double> @vpgather_baseidx_sext_nxv6i16_nxv6f64(double* %bas
; RV32-NEXT: vsext.vf4 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v8
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -1952,7 +1952,7 @@ define <vscale x 6 x double> @vpgather_baseidx_zext_nxv6i16_nxv6f64(double* %bas
; RV32-NEXT: vzext.vf4 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v8
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -2000,7 +2000,7 @@ define <vscale x 6 x double> @vpgather_baseidx_sext_nxv6i32_nxv6f64(double* %bas
; RV32-NEXT: vsext.vf2 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v8
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -2026,7 +2026,7 @@ define <vscale x 6 x double> @vpgather_baseidx_zext_nxv6i32_nxv6f64(double* %bas
; RV32-NEXT: vzext.vf2 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v8
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -2051,7 +2051,7 @@ define <vscale x 6 x double> @vpgather_baseidx_nxv6f64(double* %base, <vscale x
; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v8, v8, 3
; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v8
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -2117,7 +2117,7 @@ define <vscale x 8 x double> @vpgather_baseidx_sext_nxv8i8_nxv8f64(double* %base
; RV32-NEXT: vsext.vf8 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v8
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -2143,7 +2143,7 @@ define <vscale x 8 x double> @vpgather_baseidx_zext_nxv8i8_nxv8f64(double* %base
; RV32-NEXT: vzext.vf8 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v8
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -2192,7 +2192,7 @@ define <vscale x 8 x double> @vpgather_baseidx_sext_nxv8i16_nxv8f64(double* %bas
; RV32-NEXT: vsext.vf4 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v8
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -2218,7 +2218,7 @@ define <vscale x 8 x double> @vpgather_baseidx_zext_nxv8i16_nxv8f64(double* %bas
; RV32-NEXT: vzext.vf4 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v8
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -2266,7 +2266,7 @@ define <vscale x 8 x double> @vpgather_baseidx_sext_nxv8i32_nxv8f64(double* %bas
; RV32-NEXT: vsext.vf2 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v8
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -2292,7 +2292,7 @@ define <vscale x 8 x double> @vpgather_baseidx_zext_nxv8i32_nxv8f64(double* %bas
; RV32-NEXT: vzext.vf2 v16, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v8
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -2317,7 +2317,7 @@ define <vscale x 8 x double> @vpgather_baseidx_nxv8f64(double* %base, <vscale x
; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v8, v8, 3
; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v8
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -2466,7 +2466,7 @@ define <vscale x 16 x double> @vpgather_baseidx_sext_nxv16i16_nxv16f64(double* %
; RV32-NEXT: vsext.vf4 v24, v10
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli zero, a3, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v8
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: srli a3, a2, 3
Expand All @@ -2480,7 +2480,7 @@ define <vscale x 16 x double> @vpgather_baseidx_sext_nxv16i16_nxv16f64(double* %
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, a4, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v24, v16
; RV32-NEXT: vnsrl.wi v24, v16, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -2536,7 +2536,7 @@ define <vscale x 16 x double> @vpgather_baseidx_zext_nxv16i16_nxv16f64(double* %
; RV32-NEXT: vzext.vf4 v24, v10
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli zero, a3, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v16, v8
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: srli a3, a2, 3
Expand All @@ -2550,7 +2550,7 @@ define <vscale x 16 x double> @vpgather_baseidx_zext_nxv16i16_nxv16f64(double* %
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, a4, e32, m4, ta, mu
; RV32-NEXT: vncvt.x.x.w v24, v16
; RV32-NEXT: vnsrl.wi v24, v16, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
Expand Down
90 changes: 45 additions & 45 deletions llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll

Large diffs are not rendered by default.

76 changes: 38 additions & 38 deletions llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ define <vscale x 2 x i7> @vtrunc_nxv2i7_nxv2i16(<vscale x 2 x i16> %a, <vscale x
; CHECK-LABEL: vtrunc_nxv2i7_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t
; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i7> @llvm.vp.trunc.nxv2i7.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i1> %m, i32 %vl)
ret <vscale x 2 x i7> %v
Expand All @@ -20,7 +20,7 @@ define <vscale x 2 x i8> @vtrunc_nxv2i8_nxv2i15(<vscale x 2 x i15> %a, <vscale x
; CHECK-LABEL: vtrunc_nxv2i8_nxv2i15:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t
; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i15(<vscale x 2 x i15> %a, <vscale x 2 x i1> %m, i32 %vl)
ret <vscale x 2 x i8> %v
Expand All @@ -32,7 +32,7 @@ define <vscale x 2 x i8> @vtrunc_nxv2i8_nxv2i16(<vscale x 2 x i16> %a, <vscale x
; CHECK-LABEL: vtrunc_nxv2i8_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t
; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i1> %m, i32 %vl)
ret <vscale x 2 x i8> %v
Expand All @@ -42,7 +42,7 @@ define <vscale x 2 x i8> @vtrunc_nxv2i8_nxv2i16_unmasked(<vscale x 2 x i16> %a,
; CHECK-LABEL: vtrunc_nxv2i8_nxv2i16_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
ret <vscale x 2 x i8> %v
Expand All @@ -54,9 +54,9 @@ define <vscale x 2 x i8> @vtrunc_nxv2i8_nxv2i32(<vscale x 2 x i32> %a, <vscale x
; CHECK-LABEL: vtrunc_nxv2i8_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t
; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t
; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i32 %vl)
ret <vscale x 2 x i8> %v
Expand All @@ -66,9 +66,9 @@ define <vscale x 2 x i8> @vtrunc_nxv2i8_nxv2i32_unmasked(<vscale x 2 x i32> %a,
; CHECK-LABEL: vtrunc_nxv2i8_nxv2i32_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
ret <vscale x 2 x i8> %v
Expand All @@ -80,11 +80,11 @@ define <vscale x 2 x i8> @vtrunc_nxv2i8_nxv2i64(<vscale x 2 x i64> %a, <vscale x
; CHECK-LABEL: vtrunc_nxv2i8_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vncvt.x.x.w v10, v8, v0.t
; CHECK-NEXT: vnsrl.wi v10, v8, 0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v10, v0.t
; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t
; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %m, i32 %vl)
ret <vscale x 2 x i8> %v
Expand All @@ -94,11 +94,11 @@ define <vscale x 2 x i8> @vtrunc_nxv2i8_nxv2i64_unmasked(<vscale x 2 x i64> %a,
; CHECK-LABEL: vtrunc_nxv2i8_nxv2i64_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vncvt.x.x.w v10, v8
; CHECK-NEXT: vnsrl.wi v10, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v10
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
ret <vscale x 2 x i8> %v
Expand All @@ -110,7 +110,7 @@ define <vscale x 2 x i16> @vtrunc_nxv2i16_nxv2i32(<vscale x 2 x i32> %a, <vscale
; CHECK-LABEL: vtrunc_nxv2i16_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8, v0.t
; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i32 %vl)
ret <vscale x 2 x i16> %v
Expand All @@ -120,7 +120,7 @@ define <vscale x 2 x i16> @vtrunc_nxv2i16_nxv2i32_unmasked(<vscale x 2 x i32> %a
; CHECK-LABEL: vtrunc_nxv2i16_nxv2i32_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
ret <vscale x 2 x i16> %v
Expand All @@ -132,9 +132,9 @@ define <vscale x 2 x i16> @vtrunc_nxv2i16_nxv2i64(<vscale x 2 x i64> %a, <vscale
; CHECK-LABEL: vtrunc_nxv2i16_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vncvt.x.x.w v10, v8, v0.t
; CHECK-NEXT: vnsrl.wi v10, v8, 0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v10, v0.t
; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %m, i32 %vl)
ret <vscale x 2 x i16> %v
Expand All @@ -144,9 +144,9 @@ define <vscale x 2 x i16> @vtrunc_nxv2i16_nxv2i64_unmasked(<vscale x 2 x i64> %a
; CHECK-LABEL: vtrunc_nxv2i16_nxv2i64_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vncvt.x.x.w v10, v8
; CHECK-NEXT: vnsrl.wi v10, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v10
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
ret <vscale x 2 x i16> %v
Expand All @@ -169,18 +169,18 @@ define <vscale x 15 x i16> @vtrunc_nxv15i16_nxv15i64(<vscale x 15 x i64> %a, <vs
; CHECK-NEXT: mv a2, a3
; CHECK-NEXT: .LBB12_2:
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v28, v16, v0.t
; CHECK-NEXT: vnsrl.wi v28, v16, 0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v18, v28, v0.t
; CHECK-NEXT: vnsrl.wi v18, v28, 0, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB12_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB12_4:
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vncvt.x.x.w v20, v8, v0.t
; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v16, v20, v0.t
; CHECK-NEXT: vnsrl.wi v16, v20, 0, v0.t
; CHECK-NEXT: vmv4r.v v8, v16
; CHECK-NEXT: ret
%v = call <vscale x 15 x i16> @llvm.vp.trunc.nxv15i16.nxv15i64(<vscale x 15 x i64> %a, <vscale x 15 x i1> %m, i32 %vl)
Expand All @@ -193,7 +193,7 @@ define <vscale x 2 x i32> @vtrunc_nxv2i32_nxv2i64(<vscale x 2 x i64> %a, <vscale
; CHECK-LABEL: vtrunc_nxv2i32_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vncvt.x.x.w v10, v8, v0.t
; CHECK-NEXT: vnsrl.wi v10, v8, 0, v0.t
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.trunc.nxv2i64.nxv2i32(<vscale x 2 x i64> %a, <vscale x 2 x i1> %m, i32 %vl)
Expand All @@ -204,7 +204,7 @@ define <vscale x 2 x i32> @vtrunc_nxv2i32_nxv2i64_unmasked(<vscale x 2 x i64> %a
; CHECK-LABEL: vtrunc_nxv2i32_nxv2i64_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vncvt.x.x.w v10, v8
; CHECK-NEXT: vnsrl.wi v10, v8, 0
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.trunc.nxv2i64.nxv2i32(<vscale x 2 x i64> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
Expand All @@ -229,18 +229,18 @@ define <vscale x 32 x i7> @vtrunc_nxv32i7_nxv32i32(<vscale x 32 x i32> %a, <vsca
; CHECK-NEXT: mv a2, a3
; CHECK-NEXT: .LBB15_2:
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v28, v16, v0.t
; CHECK-NEXT: vnsrl.wi v28, v16, 0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v18, v28, v0.t
; CHECK-NEXT: vnsrl.wi v18, v28, 0, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB15_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB15_4:
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vncvt.x.x.w v20, v8, v0.t
; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v16, v20, v0.t
; CHECK-NEXT: vnsrl.wi v16, v20, 0, v0.t
; CHECK-NEXT: vmv4r.v v8, v16
; CHECK-NEXT: ret
%v = call <vscale x 32 x i7> @llvm.vp.trunc.nxv32i7.nxv32i32(<vscale x 32 x i32> %a, <vscale x 32 x i1> %m, i32 %vl)
Expand All @@ -265,18 +265,18 @@ define <vscale x 32 x i8> @vtrunc_nxv32i8_nxv32i32(<vscale x 32 x i32> %a, <vsca
; CHECK-NEXT: mv a2, a3
; CHECK-NEXT: .LBB16_2:
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v28, v16, v0.t
; CHECK-NEXT: vnsrl.wi v28, v16, 0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v18, v28, v0.t
; CHECK-NEXT: vnsrl.wi v18, v28, 0, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB16_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB16_4:
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vncvt.x.x.w v20, v8, v0.t
; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v16, v20, v0.t
; CHECK-NEXT: vnsrl.wi v16, v20, 0, v0.t
; CHECK-NEXT: vmv4r.v v8, v16
; CHECK-NEXT: ret
%v = call <vscale x 32 x i8> @llvm.vp.trunc.nxv32i8.nxv32i32(<vscale x 32 x i32> %a, <vscale x 32 x i1> %m, i32 %vl)
Expand Down Expand Up @@ -318,7 +318,7 @@ define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vs
; CHECK-NEXT: srli a7, a1, 2
; CHECK-NEXT: slli t0, a1, 3
; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v12, v16, v0.t
; CHECK-NEXT: vnsrl.wi v12, v16, 0, v0.t
; CHECK-NEXT: bltu a5, a1, .LBB17_6
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: mv a5, a1
Expand All @@ -335,7 +335,7 @@ define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vs
; CHECK-NEXT: add a5, sp, a5
; CHECK-NEXT: addi a5, a5, 16
; CHECK-NEXT: vl8re8.v v16, (a5) # Unknown-size Folded Reload
; CHECK-NEXT: vncvt.x.x.w v8, v16, v0.t
; CHECK-NEXT: vnsrl.wi v8, v16, 0, v0.t
; CHECK-NEXT: bltu a2, a4, .LBB17_8
; CHECK-NEXT: # %bb.7:
; CHECK-NEXT: mv a6, a4
Expand Down Expand Up @@ -363,7 +363,7 @@ define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vs
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vncvt.x.x.w v20, v24, v0.t
; CHECK-NEXT: vnsrl.wi v20, v24, 0, v0.t
; CHECK-NEXT: bltu a6, a1, .LBB17_12
; CHECK-NEXT: # %bb.11:
; CHECK-NEXT: mv a6, a1
Expand All @@ -372,7 +372,7 @@ define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vs
; CHECK-NEXT: vmv1r.v v0, v1
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vncvt.x.x.w v16, v24, v0.t
; CHECK-NEXT: vnsrl.wi v16, v24, 0, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
Expand Down
88 changes: 44 additions & 44 deletions llvm/test/CodeGen/RISCV/rvv/vtruncs-sdnode.ll
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ define <vscale x 1 x i8> @vtrunc_nxv1i16_nxv1i8(<vscale x 1 x i16> %va) {
; CHECK-LABEL: vtrunc_nxv1i16_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 1 x i16> %va to <vscale x 1 x i8>
ret <vscale x 1 x i8> %tvec
Expand All @@ -16,7 +16,7 @@ define <vscale x 2 x i8> @vtrunc_nxv2i16_nxv2i8(<vscale x 2 x i16> %va) {
; CHECK-LABEL: vtrunc_nxv2i16_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 2 x i16> %va to <vscale x 2 x i8>
ret <vscale x 2 x i8> %tvec
Expand All @@ -26,7 +26,7 @@ define <vscale x 4 x i8> @vtrunc_nxv4i16_nxv4i8(<vscale x 4 x i16> %va) {
; CHECK-LABEL: vtrunc_nxv4i16_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 4 x i16> %va to <vscale x 4 x i8>
ret <vscale x 4 x i8> %tvec
Expand All @@ -36,7 +36,7 @@ define <vscale x 8 x i8> @vtrunc_nxv8i16_nxv8i8(<vscale x 8 x i16> %va) {
; CHECK-LABEL: vtrunc_nxv8i16_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vncvt.x.x.w v10, v8
; CHECK-NEXT: vnsrl.wi v10, v8, 0
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
%tvec = trunc <vscale x 8 x i16> %va to <vscale x 8 x i8>
Expand All @@ -47,7 +47,7 @@ define <vscale x 16 x i8> @vtrunc_nxv16i16_nxv16i8(<vscale x 16 x i16> %va) {
; CHECK-LABEL: vtrunc_nxv16i16_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v12, v8
; CHECK-NEXT: vnsrl.wi v12, v8, 0
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
%tvec = trunc <vscale x 16 x i16> %va to <vscale x 16 x i8>
Expand All @@ -58,9 +58,9 @@ define <vscale x 1 x i8> @vtrunc_nxv1i32_nxv1i8(<vscale x 1 x i32> %va) {
; CHECK-LABEL: vtrunc_nxv1i32_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 1 x i32> %va to <vscale x 1 x i8>
ret <vscale x 1 x i8> %tvec
Expand All @@ -70,7 +70,7 @@ define <vscale x 1 x i16> @vtrunc_nxv1i32_nxv1i16(<vscale x 1 x i32> %va) {
; CHECK-LABEL: vtrunc_nxv1i32_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 1 x i32> %va to <vscale x 1 x i16>
ret <vscale x 1 x i16> %tvec
Expand All @@ -80,9 +80,9 @@ define <vscale x 2 x i8> @vtrunc_nxv2i32_nxv2i8(<vscale x 2 x i32> %va) {
; CHECK-LABEL: vtrunc_nxv2i32_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 2 x i32> %va to <vscale x 2 x i8>
ret <vscale x 2 x i8> %tvec
Expand All @@ -92,7 +92,7 @@ define <vscale x 2 x i16> @vtrunc_nxv2i32_nxv2i16(<vscale x 2 x i32> %va) {
; CHECK-LABEL: vtrunc_nxv2i32_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 2 x i32> %va to <vscale x 2 x i16>
ret <vscale x 2 x i16> %tvec
Expand All @@ -102,9 +102,9 @@ define <vscale x 4 x i8> @vtrunc_nxv4i32_nxv4i8(<vscale x 4 x i32> %va) {
; CHECK-LABEL: vtrunc_nxv4i32_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vncvt.x.x.w v10, v8
; CHECK-NEXT: vnsrl.wi v10, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v10
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 4 x i32> %va to <vscale x 4 x i8>
ret <vscale x 4 x i8> %tvec
Expand All @@ -114,7 +114,7 @@ define <vscale x 4 x i16> @vtrunc_nxv4i32_nxv4i16(<vscale x 4 x i32> %va) {
; CHECK-LABEL: vtrunc_nxv4i32_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vncvt.x.x.w v10, v8
; CHECK-NEXT: vnsrl.wi v10, v8, 0
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
%tvec = trunc <vscale x 4 x i32> %va to <vscale x 4 x i16>
Expand All @@ -125,9 +125,9 @@ define <vscale x 8 x i8> @vtrunc_nxv8i32_nxv8i8(<vscale x 8 x i32> %va) {
; CHECK-LABEL: vtrunc_nxv8i32_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v12, v8
; CHECK-NEXT: vnsrl.wi v12, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v12
; CHECK-NEXT: vnsrl.wi v8, v12, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 8 x i32> %va to <vscale x 8 x i8>
ret <vscale x 8 x i8> %tvec
Expand All @@ -137,7 +137,7 @@ define <vscale x 8 x i16> @vtrunc_nxv8i32_nxv8i16(<vscale x 8 x i32> %va) {
; CHECK-LABEL: vtrunc_nxv8i32_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v12, v8
; CHECK-NEXT: vnsrl.wi v12, v8, 0
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
%tvec = trunc <vscale x 8 x i32> %va to <vscale x 8 x i16>
Expand All @@ -148,9 +148,9 @@ define <vscale x 16 x i8> @vtrunc_nxv16i32_nxv16i8(<vscale x 16 x i32> %va) {
; CHECK-LABEL: vtrunc_nxv16i32_nxv16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v16, v8
; CHECK-NEXT: vnsrl.wi v16, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v16
; CHECK-NEXT: vnsrl.wi v8, v16, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 16 x i32> %va to <vscale x 16 x i8>
ret <vscale x 16 x i8> %tvec
Expand All @@ -160,7 +160,7 @@ define <vscale x 16 x i16> @vtrunc_nxv16i32_nxv16i16(<vscale x 16 x i32> %va) {
; CHECK-LABEL: vtrunc_nxv16i32_nxv16i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v16, v8
; CHECK-NEXT: vnsrl.wi v16, v8, 0
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
%tvec = trunc <vscale x 16 x i32> %va to <vscale x 16 x i16>
Expand All @@ -171,11 +171,11 @@ define <vscale x 1 x i8> @vtrunc_nxv1i64_nxv1i8(<vscale x 1 x i64> %va) {
; CHECK-LABEL: vtrunc_nxv1i64_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 1 x i64> %va to <vscale x 1 x i8>
ret <vscale x 1 x i8> %tvec
Expand All @@ -185,9 +185,9 @@ define <vscale x 1 x i16> @vtrunc_nxv1i64_nxv1i16(<vscale x 1 x i64> %va) {
; CHECK-LABEL: vtrunc_nxv1i64_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 1 x i64> %va to <vscale x 1 x i16>
ret <vscale x 1 x i16> %tvec
Expand All @@ -197,7 +197,7 @@ define <vscale x 1 x i32> @vtrunc_nxv1i64_nxv1i32(<vscale x 1 x i64> %va) {
; CHECK-LABEL: vtrunc_nxv1i64_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 1 x i64> %va to <vscale x 1 x i32>
ret <vscale x 1 x i32> %tvec
Expand All @@ -207,11 +207,11 @@ define <vscale x 2 x i8> @vtrunc_nxv2i64_nxv2i8(<vscale x 2 x i64> %va) {
; CHECK-LABEL: vtrunc_nxv2i64_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vncvt.x.x.w v10, v8
; CHECK-NEXT: vnsrl.wi v10, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v10
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 2 x i64> %va to <vscale x 2 x i8>
ret <vscale x 2 x i8> %tvec
Expand All @@ -221,9 +221,9 @@ define <vscale x 2 x i16> @vtrunc_nxv2i64_nxv2i16(<vscale x 2 x i64> %va) {
; CHECK-LABEL: vtrunc_nxv2i64_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vncvt.x.x.w v10, v8
; CHECK-NEXT: vnsrl.wi v10, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v10
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 2 x i64> %va to <vscale x 2 x i16>
ret <vscale x 2 x i16> %tvec
Expand All @@ -233,7 +233,7 @@ define <vscale x 2 x i32> @vtrunc_nxv2i64_nxv2i32(<vscale x 2 x i64> %va) {
; CHECK-LABEL: vtrunc_nxv2i64_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vncvt.x.x.w v10, v8
; CHECK-NEXT: vnsrl.wi v10, v8, 0
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
%tvec = trunc <vscale x 2 x i64> %va to <vscale x 2 x i32>
Expand All @@ -244,11 +244,11 @@ define <vscale x 4 x i8> @vtrunc_nxv4i64_nxv4i8(<vscale x 4 x i64> %va) {
; CHECK-LABEL: vtrunc_nxv4i64_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v12, v8
; CHECK-NEXT: vnsrl.wi v12, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v12
; CHECK-NEXT: vnsrl.wi v8, v12, 0
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 4 x i64> %va to <vscale x 4 x i8>
ret <vscale x 4 x i8> %tvec
Expand All @@ -258,9 +258,9 @@ define <vscale x 4 x i16> @vtrunc_nxv4i64_nxv4i16(<vscale x 4 x i64> %va) {
; CHECK-LABEL: vtrunc_nxv4i64_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v12, v8
; CHECK-NEXT: vnsrl.wi v12, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v12
; CHECK-NEXT: vnsrl.wi v8, v12, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 4 x i64> %va to <vscale x 4 x i16>
ret <vscale x 4 x i16> %tvec
Expand All @@ -270,7 +270,7 @@ define <vscale x 4 x i32> @vtrunc_nxv4i64_nxv4i32(<vscale x 4 x i64> %va) {
; CHECK-LABEL: vtrunc_nxv4i64_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v12, v8
; CHECK-NEXT: vnsrl.wi v12, v8, 0
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
%tvec = trunc <vscale x 4 x i64> %va to <vscale x 4 x i32>
Expand All @@ -281,11 +281,11 @@ define <vscale x 8 x i8> @vtrunc_nxv8i64_nxv8i8(<vscale x 8 x i64> %va) {
; CHECK-LABEL: vtrunc_nxv8i64_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v16, v8
; CHECK-NEXT: vnsrl.wi v16, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v10, v16
; CHECK-NEXT: vnsrl.wi v10, v16, 0
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v10
; CHECK-NEXT: vnsrl.wi v8, v10, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 8 x i64> %va to <vscale x 8 x i8>
ret <vscale x 8 x i8> %tvec
Expand All @@ -295,9 +295,9 @@ define <vscale x 8 x i16> @vtrunc_nxv8i64_nxv8i16(<vscale x 8 x i64> %va) {
; CHECK-LABEL: vtrunc_nxv8i64_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v16, v8
; CHECK-NEXT: vnsrl.wi v16, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vncvt.x.x.w v8, v16
; CHECK-NEXT: vnsrl.wi v8, v16, 0
; CHECK-NEXT: ret
%tvec = trunc <vscale x 8 x i64> %va to <vscale x 8 x i16>
ret <vscale x 8 x i16> %tvec
Expand All @@ -307,7 +307,7 @@ define <vscale x 8 x i32> @vtrunc_nxv8i64_nxv8i32(<vscale x 8 x i64> %va) {
; CHECK-LABEL: vtrunc_nxv8i64_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vncvt.x.x.w v16, v8
; CHECK-NEXT: vnsrl.wi v16, v8, 0
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: ret
%tvec = trunc <vscale x 8 x i64> %va to <vscale x 8 x i32>
Expand Down