56 changes: 28 additions & 28 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
Original file line number Diff line number Diff line change
Expand Up @@ -965,7 +965,7 @@ define <8 x i64> @vpgather_baseidx_sext_v8i8_v8i64(i64* %base, <8 x i8> %idxs, <
; RV32-NEXT: vsext.vf8 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wx v12, v8, zero
; RV32-NEXT: vncvt.x.x.w v12, v8
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
Expand All @@ -991,7 +991,7 @@ define <8 x i64> @vpgather_baseidx_zext_v8i8_v8i64(i64* %base, <8 x i8> %idxs, <
; RV32-NEXT: vzext.vf8 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wx v12, v8, zero
; RV32-NEXT: vncvt.x.x.w v12, v8
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1040,7 +1040,7 @@ define <8 x i64> @vpgather_baseidx_sext_v8i16_v8i64(i64* %base, <8 x i16> %idxs,
; RV32-NEXT: vsext.vf4 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wx v12, v8, zero
; RV32-NEXT: vncvt.x.x.w v12, v8
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
Expand All @@ -1066,7 +1066,7 @@ define <8 x i64> @vpgather_baseidx_zext_v8i16_v8i64(i64* %base, <8 x i16> %idxs,
; RV32-NEXT: vzext.vf4 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wx v12, v8, zero
; RV32-NEXT: vncvt.x.x.w v12, v8
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1114,7 +1114,7 @@ define <8 x i64> @vpgather_baseidx_sext_v8i32_v8i64(i64* %base, <8 x i32> %idxs,
; RV32-NEXT: vsext.vf2 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wx v12, v8, zero
; RV32-NEXT: vncvt.x.x.w v12, v8
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
Expand All @@ -1140,7 +1140,7 @@ define <8 x i64> @vpgather_baseidx_zext_v8i32_v8i64(i64* %base, <8 x i32> %idxs,
; RV32-NEXT: vzext.vf2 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wx v12, v8, zero
; RV32-NEXT: vncvt.x.x.w v12, v8
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
Expand All @@ -1165,7 +1165,7 @@ define <8 x i64> @vpgather_baseidx_v8i64(i64* %base, <8 x i64> %idxs, <8 x i1> %
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vsll.vi v8, v8, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wx v12, v8, zero
; RV32-NEXT: vncvt.x.x.w v12, v8
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1702,7 +1702,7 @@ define <8 x double> @vpgather_baseidx_sext_v8i8_v8f64(double* %base, <8 x i8> %i
; RV32-NEXT: vsext.vf8 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wx v12, v8, zero
; RV32-NEXT: vncvt.x.x.w v12, v8
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
Expand All @@ -1728,7 +1728,7 @@ define <8 x double> @vpgather_baseidx_zext_v8i8_v8f64(double* %base, <8 x i8> %i
; RV32-NEXT: vzext.vf8 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wx v12, v8, zero
; RV32-NEXT: vncvt.x.x.w v12, v8
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1777,7 +1777,7 @@ define <8 x double> @vpgather_baseidx_sext_v8i16_v8f64(double* %base, <8 x i16>
; RV32-NEXT: vsext.vf4 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wx v12, v8, zero
; RV32-NEXT: vncvt.x.x.w v12, v8
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
Expand All @@ -1803,7 +1803,7 @@ define <8 x double> @vpgather_baseidx_zext_v8i16_v8f64(double* %base, <8 x i16>
; RV32-NEXT: vzext.vf4 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wx v12, v8, zero
; RV32-NEXT: vncvt.x.x.w v12, v8
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1851,7 +1851,7 @@ define <8 x double> @vpgather_baseidx_sext_v8i32_v8f64(double* %base, <8 x i32>
; RV32-NEXT: vsext.vf2 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wx v12, v8, zero
; RV32-NEXT: vncvt.x.x.w v12, v8
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
Expand All @@ -1877,7 +1877,7 @@ define <8 x double> @vpgather_baseidx_zext_v8i32_v8f64(double* %base, <8 x i32>
; RV32-NEXT: vzext.vf2 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wx v12, v8, zero
; RV32-NEXT: vncvt.x.x.w v12, v8
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
Expand All @@ -1902,7 +1902,7 @@ define <8 x double> @vpgather_baseidx_v8f64(double* %base, <8 x i64> %idxs, <8 x
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vsll.vi v8, v8, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wx v12, v8, zero
; RV32-NEXT: vncvt.x.x.w v12, v8
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -2055,7 +2055,7 @@ define <32 x double> @vpgather_baseidx_sext_v32i8_v32f64(double* %base, <32 x i8
; RV32-NEXT: vsext.vf8 v24, v8
; RV32-NEXT: vsll.vi v16, v16, 3
; RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v12, v16, zero
; RV32-NEXT: vncvt.x.x.w v12, v16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu
; RV32-NEXT: vslidedown.vi v0, v10, 2
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
Expand All @@ -2068,7 +2068,7 @@ define <32 x double> @vpgather_baseidx_sext_v32i8_v32f64(double* %base, <32 x i8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v24, v24, 3
; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v4, v24, zero
; RV32-NEXT: vncvt.x.x.w v4, v24
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vmv1r.v v0, v10
; RV32-NEXT: vluxei32.v v8, (a0), v4, v0.t
Expand Down Expand Up @@ -2128,7 +2128,7 @@ define <32 x double> @vpgather_baseidx_zext_v32i8_v32f64(double* %base, <32 x i8
; RV32-NEXT: vzext.vf8 v24, v8
; RV32-NEXT: vsll.vi v16, v16, 3
; RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v12, v16, zero
; RV32-NEXT: vncvt.x.x.w v12, v16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu
; RV32-NEXT: vslidedown.vi v0, v10, 2
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
Expand All @@ -2141,7 +2141,7 @@ define <32 x double> @vpgather_baseidx_zext_v32i8_v32f64(double* %base, <32 x i8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v24, v24, 3
; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v4, v24, zero
; RV32-NEXT: vncvt.x.x.w v4, v24
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vmv1r.v v0, v10
; RV32-NEXT: vluxei32.v v8, (a0), v4, v0.t
Expand Down Expand Up @@ -2265,7 +2265,7 @@ define <32 x double> @vpgather_baseidx_sext_v32i16_v32f64(double* %base, <32 x i
; RV32-NEXT: vsext.vf4 v24, v8
; RV32-NEXT: vsll.vi v16, v16, 3
; RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v8, v16, zero
; RV32-NEXT: vncvt.x.x.w v8, v16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu
; RV32-NEXT: vslidedown.vi v0, v12, 2
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
Expand All @@ -2278,7 +2278,7 @@ define <32 x double> @vpgather_baseidx_sext_v32i16_v32f64(double* %base, <32 x i
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v24, v24, 3
; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v4, v24, zero
; RV32-NEXT: vncvt.x.x.w v4, v24
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vmv1r.v v0, v12
; RV32-NEXT: vluxei32.v v8, (a0), v4, v0.t
Expand Down Expand Up @@ -2338,7 +2338,7 @@ define <32 x double> @vpgather_baseidx_zext_v32i16_v32f64(double* %base, <32 x i
; RV32-NEXT: vzext.vf4 v24, v8
; RV32-NEXT: vsll.vi v16, v16, 3
; RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v8, v16, zero
; RV32-NEXT: vncvt.x.x.w v8, v16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu
; RV32-NEXT: vslidedown.vi v0, v12, 2
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
Expand All @@ -2351,7 +2351,7 @@ define <32 x double> @vpgather_baseidx_zext_v32i16_v32f64(double* %base, <32 x i
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v24, v24, 3
; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v4, v24, zero
; RV32-NEXT: vncvt.x.x.w v4, v24
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vmv1r.v v0, v12
; RV32-NEXT: vluxei32.v v8, (a0), v4, v0.t
Expand Down Expand Up @@ -2474,7 +2474,7 @@ define <32 x double> @vpgather_baseidx_sext_v32i32_v32f64(double* %base, <32 x i
; RV32-NEXT: vsext.vf2 v24, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v4, v8, zero
; RV32-NEXT: vncvt.x.x.w v4, v8
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu
; RV32-NEXT: vslidedown.vi v0, v1, 2
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
Expand All @@ -2487,7 +2487,7 @@ define <32 x double> @vpgather_baseidx_sext_v32i32_v32f64(double* %base, <32 x i
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vncvt.x.x.w v24, v8
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vmv1r.v v0, v1
; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t
Expand Down Expand Up @@ -2547,7 +2547,7 @@ define <32 x double> @vpgather_baseidx_zext_v32i32_v32f64(double* %base, <32 x i
; RV32-NEXT: vzext.vf2 v24, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v4, v8, zero
; RV32-NEXT: vncvt.x.x.w v4, v8
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu
; RV32-NEXT: vslidedown.vi v0, v1, 2
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
Expand All @@ -2560,7 +2560,7 @@ define <32 x double> @vpgather_baseidx_zext_v32i32_v32f64(double* %base, <32 x i
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vncvt.x.x.w v24, v8
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vmv1r.v v0, v1
; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t
Expand Down Expand Up @@ -2616,7 +2616,7 @@ define <32 x double> @vpgather_baseidx_v32f64(double* %base, <32 x i64> %idxs, <
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v16, v16, 3
; RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v28, v16, zero
; RV32-NEXT: vncvt.x.x.w v28, v16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu
; RV32-NEXT: vslidedown.vi v0, v24, 2
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
Expand All @@ -2629,7 +2629,7 @@ define <32 x double> @vpgather_baseidx_v32f64(double* %base, <32 x i64> %idxs, <
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v8, v8, 3
; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v28, v8, zero
; RV32-NEXT: vncvt.x.x.w v28, v8
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vmv1r.v v0, v24
; RV32-NEXT: vluxei32.v v8, (a0), v28, v0.t
Expand Down
76 changes: 38 additions & 38 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
Original file line number Diff line number Diff line change
Expand Up @@ -26,15 +26,15 @@ define void @vpscatter_v2i16_truncstore_v2i8(<2 x i16> %val, <2 x i8*> %ptrs, <2
; RV32-LABEL: vpscatter_v2i16_truncstore_v2i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vncvt.x.x.w v8, v8
; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_v2i16_truncstore_v2i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vncvt.x.x.w v8, v8
; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t
; RV64-NEXT: ret
Expand All @@ -47,19 +47,19 @@ define void @vpscatter_v2i32_truncstore_v2i8(<2 x i32> %val, <2 x i8*> %ptrs, <2
; RV32-LABEL: vpscatter_v2i32_truncstore_v2i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vncvt.x.x.w v8, v8
; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vncvt.x.x.w v8, v8
; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_v2i32_truncstore_v2i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vncvt.x.x.w v8, v8
; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vncvt.x.x.w v8, v8
; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t
; RV64-NEXT: ret
Expand All @@ -72,23 +72,23 @@ define void @vpscatter_v2i64_truncstore_v2i8(<2 x i64> %val, <2 x i8*> %ptrs, <2
; RV32-LABEL: vpscatter_v2i64_truncstore_v2i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vncvt.x.x.w v8, v8
; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vncvt.x.x.w v8, v8
; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vncvt.x.x.w v8, v8
; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_v2i64_truncstore_v2i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vncvt.x.x.w v8, v8
; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vncvt.x.x.w v8, v8
; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vncvt.x.x.w v8, v8
; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t
; RV64-NEXT: ret
Expand Down Expand Up @@ -194,15 +194,15 @@ define void @vpscatter_v2i32_truncstore_v2i16(<2 x i32> %val, <2 x i16*> %ptrs,
; RV32-LABEL: vpscatter_v2i32_truncstore_v2i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vncvt.x.x.w v8, v8
; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_v2i32_truncstore_v2i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vncvt.x.x.w v8, v8
; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t
; RV64-NEXT: ret
Expand All @@ -215,19 +215,19 @@ define void @vpscatter_v2i64_truncstore_v2i16(<2 x i64> %val, <2 x i16*> %ptrs,
; RV32-LABEL: vpscatter_v2i64_truncstore_v2i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vncvt.x.x.w v8, v8
; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vncvt.x.x.w v8, v8
; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_v2i64_truncstore_v2i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vncvt.x.x.w v8, v8
; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vncvt.x.x.w v8, v8
; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t
; RV64-NEXT: ret
Expand Down Expand Up @@ -442,15 +442,15 @@ define void @vpscatter_v2i64_truncstore_v2i32(<2 x i64> %val, <2 x i32*> %ptrs,
; RV32-LABEL: vpscatter_v2i64_truncstore_v2i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vncvt.x.x.w v8, v8
; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpscatter_v2i64_truncstore_v2i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vncvt.x.x.w v8, v8
; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; RV64-NEXT: vsoxei64.v v8, (zero), v9, v0.t
; RV64-NEXT: ret
Expand Down Expand Up @@ -779,7 +779,7 @@ define void @vpscatter_baseidx_sext_v8i8_v8i64(<8 x i64> %val, i64* %base, <8 x
; RV32-NEXT: vsext.vf8 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vncvt.x.x.w v16, v12
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -805,7 +805,7 @@ define void @vpscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, i64* %base, <8 x
; RV32-NEXT: vzext.vf8 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vncvt.x.x.w v16, v12
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -854,7 +854,7 @@ define void @vpscatter_baseidx_sext_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x
; RV32-NEXT: vsext.vf4 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vncvt.x.x.w v16, v12
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -880,7 +880,7 @@ define void @vpscatter_baseidx_zext_v8i16_v8i64(<8 x i64> %val, i64* %base, <8 x
; RV32-NEXT: vzext.vf4 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vncvt.x.x.w v16, v12
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -928,7 +928,7 @@ define void @vpscatter_baseidx_sext_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x
; RV32-NEXT: vsext.vf2 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vncvt.x.x.w v16, v12
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -954,7 +954,7 @@ define void @vpscatter_baseidx_zext_v8i32_v8i64(<8 x i64> %val, i64* %base, <8 x
; RV32-NEXT: vzext.vf2 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vncvt.x.x.w v16, v12
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -979,7 +979,7 @@ define void @vpscatter_baseidx_v8i64(<8 x i64> %val, i64* %base, <8 x i64> %idxs
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vsll.vi v12, v12, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vncvt.x.x.w v16, v12
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1500,7 +1500,7 @@ define void @vpscatter_baseidx_sext_v8i8_v8f64(<8 x double> %val, double* %base,
; RV32-NEXT: vsext.vf8 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vncvt.x.x.w v16, v12
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -1526,7 +1526,7 @@ define void @vpscatter_baseidx_zext_v8i8_v8f64(<8 x double> %val, double* %base,
; RV32-NEXT: vzext.vf8 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vncvt.x.x.w v16, v12
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1575,7 +1575,7 @@ define void @vpscatter_baseidx_sext_v8i16_v8f64(<8 x double> %val, double* %base
; RV32-NEXT: vsext.vf4 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vncvt.x.x.w v16, v12
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -1601,7 +1601,7 @@ define void @vpscatter_baseidx_zext_v8i16_v8f64(<8 x double> %val, double* %base
; RV32-NEXT: vzext.vf4 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vncvt.x.x.w v16, v12
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1649,7 +1649,7 @@ define void @vpscatter_baseidx_sext_v8i32_v8f64(<8 x double> %val, double* %base
; RV32-NEXT: vsext.vf2 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vncvt.x.x.w v16, v12
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -1675,7 +1675,7 @@ define void @vpscatter_baseidx_zext_v8i32_v8f64(<8 x double> %val, double* %base
; RV32-NEXT: vzext.vf2 v16, v12
; RV32-NEXT: vsll.vi v12, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vncvt.x.x.w v16, v12
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand All @@ -1700,7 +1700,7 @@ define void @vpscatter_baseidx_v8f64(<8 x double> %val, double* %base, <8 x i64>
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vsll.vi v12, v12, 3
; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; RV32-NEXT: vnsrl.wx v16, v12, zero
; RV32-NEXT: vncvt.x.x.w v16, v12
; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1924,7 +1924,7 @@ define void @vpscatter_baseidx_sext_v32i32_v32f64(<32 x double> %val, double* %b
; RV32-NEXT: vsext.vf2 v24, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v16, v8, zero
; RV32-NEXT: vncvt.x.x.w v16, v8
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: addi a1, a2, -16
; RV32-NEXT: addi a4, sp, 16
Expand All @@ -1937,7 +1937,7 @@ define void @vpscatter_baseidx_sext_v32i32_v32f64(<32 x double> %val, double* %b
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, a3, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v16, v8, zero
; RV32-NEXT: vncvt.x.x.w v16, v8
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu
; RV32-NEXT: vslidedown.vi v0, v0, 2
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu
Expand Down Expand Up @@ -2058,7 +2058,7 @@ define void @vpscatter_baseidx_zext_v32i32_v32f64(<32 x double> %val, double* %b
; RV32-NEXT: vzext.vf2 v24, v8
; RV32-NEXT: vsll.vi v8, v16, 3
; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v16, v8, zero
; RV32-NEXT: vncvt.x.x.w v16, v8
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: addi a1, a2, -16
; RV32-NEXT: addi a4, sp, 16
Expand All @@ -2071,7 +2071,7 @@ define void @vpscatter_baseidx_zext_v32i32_v32f64(<32 x double> %val, double* %b
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, a3, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v16, v8, zero
; RV32-NEXT: vncvt.x.x.w v16, v8
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, mu
; RV32-NEXT: vslidedown.vi v0, v0, 2
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu
Expand Down
34 changes: 17 additions & 17 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ define <2 x i8> @vxor_vi_v2i8_unmasked_1(<2 x i8> %va, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v2i8_unmasked_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vxor.vi v8, v8, -1
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
%vb = shufflevector <2 x i8> %elt.head, <2 x i8> poison, <2 x i32> zeroinitializer
Expand Down Expand Up @@ -222,7 +222,7 @@ define <4 x i8> @vxor_vi_v4i8_unmasked_1(<4 x i8> %va, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v4i8_unmasked_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vxor.vi v8, v8, -1
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
%vb = shufflevector <4 x i8> %elt.head, <4 x i8> poison, <4 x i32> zeroinitializer
Expand Down Expand Up @@ -324,7 +324,7 @@ define <8 x i8> @vxor_vi_v8i8_unmasked_1(<8 x i8> %va, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v8i8_unmasked_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vxor.vi v8, v8, -1
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
%vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer
Expand Down Expand Up @@ -426,7 +426,7 @@ define <9 x i8> @vxor_vi_v9i8_unmasked_1(<9 x i8> %va, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v9i8_unmasked_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vxor.vi v8, v8, -1
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: ret
%elt.head = insertelement <9 x i8> poison, i8 -1, i32 0
%vb = shufflevector <9 x i8> %elt.head, <9 x i8> poison, <9 x i32> zeroinitializer
Expand Down Expand Up @@ -528,7 +528,7 @@ define <16 x i8> @vxor_vi_v16i8_unmasked_1(<16 x i8> %va, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v16i8_unmasked_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vxor.vi v8, v8, -1
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
%vb = shufflevector <16 x i8> %elt.head, <16 x i8> poison, <16 x i32> zeroinitializer
Expand Down Expand Up @@ -630,7 +630,7 @@ define <2 x i16> @vxor_vi_v2i16_unmasked_1(<2 x i16> %va, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v2i16_unmasked_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vxor.vi v8, v8, -1
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
%vb = shufflevector <2 x i16> %elt.head, <2 x i16> poison, <2 x i32> zeroinitializer
Expand Down Expand Up @@ -732,7 +732,7 @@ define <4 x i16> @vxor_vi_v4i16_unmasked_1(<4 x i16> %va, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v4i16_unmasked_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vxor.vi v8, v8, -1
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
%vb = shufflevector <4 x i16> %elt.head, <4 x i16> poison, <4 x i32> zeroinitializer
Expand Down Expand Up @@ -834,7 +834,7 @@ define <8 x i16> @vxor_vi_v8i16_unmasked_1(<8 x i16> %va, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v8i16_unmasked_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vxor.vi v8, v8, -1
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
%vb = shufflevector <8 x i16> %elt.head, <8 x i16> poison, <8 x i32> zeroinitializer
Expand Down Expand Up @@ -936,7 +936,7 @@ define <16 x i16> @vxor_vi_v16i16_unmasked_1(<16 x i16> %va, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v16i16_unmasked_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vxor.vi v8, v8, -1
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
%vb = shufflevector <16 x i16> %elt.head, <16 x i16> poison, <16 x i32> zeroinitializer
Expand Down Expand Up @@ -1038,7 +1038,7 @@ define <2 x i32> @vxor_vi_v2i32_unmasked_1(<2 x i32> %va, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v2i32_unmasked_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vxor.vi v8, v8, -1
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
%vb = shufflevector <2 x i32> %elt.head, <2 x i32> poison, <2 x i32> zeroinitializer
Expand Down Expand Up @@ -1140,7 +1140,7 @@ define <4 x i32> @vxor_vi_v4i32_unmasked_1(<4 x i32> %va, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v4i32_unmasked_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vxor.vi v8, v8, -1
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
%vb = shufflevector <4 x i32> %elt.head, <4 x i32> poison, <4 x i32> zeroinitializer
Expand Down Expand Up @@ -1242,7 +1242,7 @@ define <8 x i32> @vxor_vi_v8i32_unmasked_1(<8 x i32> %va, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v8i32_unmasked_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vxor.vi v8, v8, -1
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
%vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer
Expand Down Expand Up @@ -1344,7 +1344,7 @@ define <16 x i32> @vxor_vi_v16i32_unmasked_1(<16 x i32> %va, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v16i32_unmasked_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vxor.vi v8, v8, -1
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
%vb = shufflevector <16 x i32> %elt.head, <16 x i32> poison, <16 x i32> zeroinitializer
Expand Down Expand Up @@ -1474,7 +1474,7 @@ define <2 x i64> @vxor_vi_v2i64_unmasked_1(<2 x i64> %va, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v2i64_unmasked_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vxor.vi v8, v8, -1
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
%vb = shufflevector <2 x i64> %elt.head, <2 x i64> poison, <2 x i32> zeroinitializer
Expand Down Expand Up @@ -1604,7 +1604,7 @@ define <4 x i64> @vxor_vi_v4i64_unmasked_1(<4 x i64> %va, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v4i64_unmasked_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vxor.vi v8, v8, -1
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
%vb = shufflevector <4 x i64> %elt.head, <4 x i64> poison, <4 x i32> zeroinitializer
Expand Down Expand Up @@ -1734,7 +1734,7 @@ define <8 x i64> @vxor_vi_v8i64_unmasked_1(<8 x i64> %va, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v8i64_unmasked_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vxor.vi v8, v8, -1
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
%vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer
Expand Down Expand Up @@ -1864,7 +1864,7 @@ define <16 x i64> @vxor_vi_v16i64_unmasked_1(<16 x i64> %va, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v16i64_unmasked_1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
; CHECK-NEXT: vxor.vi v8, v8, -1
; CHECK-NEXT: vnot.v v8, v8
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
%vb = shufflevector <16 x i64> %elt.head, <16 x i64> poison, <16 x i32> zeroinitializer
Expand Down
30 changes: 15 additions & 15 deletions llvm/test/CodeGen/RISCV/rvv/fround-sdnode.ll
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ define <vscale x 1 x half> @round_nxv1f16(<vscale x 1 x half> %x) {
; CHECK-NEXT: lui a0, %hi(.LCPI0_1)
; CHECK-NEXT: flh ft1, %lo(.LCPI0_1)(a0)
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vfsgnjx.vv v9, v8, v8
; CHECK-NEXT: vfabs.v v9, v8
; CHECK-NEXT: vmflt.vf v0, v9, ft0
; CHECK-NEXT: vfadd.vf v9, v9, ft1
; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v9
Expand All @@ -33,7 +33,7 @@ define <vscale x 2 x half> @round_nxv2f16(<vscale x 2 x half> %x) {
; CHECK-NEXT: lui a0, %hi(.LCPI1_1)
; CHECK-NEXT: flh ft1, %lo(.LCPI1_1)(a0)
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vfsgnjx.vv v9, v8, v8
; CHECK-NEXT: vfabs.v v9, v8
; CHECK-NEXT: vmflt.vf v0, v9, ft0
; CHECK-NEXT: vfadd.vf v9, v9, ft1
; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v9
Expand All @@ -54,7 +54,7 @@ define <vscale x 4 x half> @round_nxv4f16(<vscale x 4 x half> %x) {
; CHECK-NEXT: lui a0, %hi(.LCPI2_1)
; CHECK-NEXT: flh ft1, %lo(.LCPI2_1)(a0)
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vfsgnjx.vv v9, v8, v8
; CHECK-NEXT: vfabs.v v9, v8
; CHECK-NEXT: vmflt.vf v0, v9, ft0
; CHECK-NEXT: vfadd.vf v9, v9, ft1
; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v9
Expand All @@ -75,7 +75,7 @@ define <vscale x 8 x half> @round_nxv8f16(<vscale x 8 x half> %x) {
; CHECK-NEXT: lui a0, %hi(.LCPI3_1)
; CHECK-NEXT: flh ft1, %lo(.LCPI3_1)(a0)
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vfsgnjx.vv v10, v8, v8
; CHECK-NEXT: vfabs.v v10, v8
; CHECK-NEXT: vmflt.vf v0, v10, ft0
; CHECK-NEXT: vfadd.vf v10, v10, ft1
; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v10
Expand All @@ -96,7 +96,7 @@ define <vscale x 16 x half> @round_nxv16f16(<vscale x 16 x half> %x) {
; CHECK-NEXT: lui a0, %hi(.LCPI4_1)
; CHECK-NEXT: flh ft1, %lo(.LCPI4_1)(a0)
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vfsgnjx.vv v12, v8, v8
; CHECK-NEXT: vfabs.v v12, v8
; CHECK-NEXT: vmflt.vf v0, v12, ft0
; CHECK-NEXT: vfadd.vf v12, v12, ft1
; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v12
Expand All @@ -117,7 +117,7 @@ define <vscale x 32 x half> @round_nxv32f16(<vscale x 32 x half> %x) {
; CHECK-NEXT: lui a0, %hi(.LCPI5_1)
; CHECK-NEXT: flh ft1, %lo(.LCPI5_1)(a0)
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vfsgnjx.vv v16, v8, v8
; CHECK-NEXT: vfabs.v v16, v8
; CHECK-NEXT: vmflt.vf v0, v16, ft0
; CHECK-NEXT: vfadd.vf v16, v16, ft1
; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v16
Expand All @@ -138,7 +138,7 @@ define <vscale x 1 x float> @round_nxv1f32(<vscale x 1 x float> %x) {
; CHECK-NEXT: lui a0, %hi(.LCPI6_1)
; CHECK-NEXT: flw ft1, %lo(.LCPI6_1)(a0)
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfsgnjx.vv v9, v8, v8
; CHECK-NEXT: vfabs.v v9, v8
; CHECK-NEXT: vmflt.vf v0, v9, ft0
; CHECK-NEXT: vfadd.vf v9, v9, ft1
; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v9
Expand All @@ -159,7 +159,7 @@ define <vscale x 2 x float> @round_nxv2f32(<vscale x 2 x float> %x) {
; CHECK-NEXT: lui a0, %hi(.LCPI7_1)
; CHECK-NEXT: flw ft1, %lo(.LCPI7_1)(a0)
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vfsgnjx.vv v9, v8, v8
; CHECK-NEXT: vfabs.v v9, v8
; CHECK-NEXT: vmflt.vf v0, v9, ft0
; CHECK-NEXT: vfadd.vf v9, v9, ft1
; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v9
Expand All @@ -180,7 +180,7 @@ define <vscale x 4 x float> @round_nxv4f32(<vscale x 4 x float> %x) {
; CHECK-NEXT: lui a0, %hi(.LCPI8_1)
; CHECK-NEXT: flw ft1, %lo(.LCPI8_1)(a0)
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vfsgnjx.vv v10, v8, v8
; CHECK-NEXT: vfabs.v v10, v8
; CHECK-NEXT: vmflt.vf v0, v10, ft0
; CHECK-NEXT: vfadd.vf v10, v10, ft1
; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v10
Expand All @@ -201,7 +201,7 @@ define <vscale x 8 x float> @round_nxv8f32(<vscale x 8 x float> %x) {
; CHECK-NEXT: lui a0, %hi(.LCPI9_1)
; CHECK-NEXT: flw ft1, %lo(.LCPI9_1)(a0)
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vfsgnjx.vv v12, v8, v8
; CHECK-NEXT: vfabs.v v12, v8
; CHECK-NEXT: vmflt.vf v0, v12, ft0
; CHECK-NEXT: vfadd.vf v12, v12, ft1
; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v12
Expand All @@ -222,7 +222,7 @@ define <vscale x 16 x float> @round_nxv16f32(<vscale x 16 x float> %x) {
; CHECK-NEXT: lui a0, %hi(.LCPI10_1)
; CHECK-NEXT: flw ft1, %lo(.LCPI10_1)(a0)
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vfsgnjx.vv v16, v8, v8
; CHECK-NEXT: vfabs.v v16, v8
; CHECK-NEXT: vmflt.vf v0, v16, ft0
; CHECK-NEXT: vfadd.vf v16, v16, ft1
; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v16
Expand All @@ -243,7 +243,7 @@ define <vscale x 1 x double> @round_nxv1f64(<vscale x 1 x double> %x) {
; CHECK-NEXT: lui a0, %hi(.LCPI11_1)
; CHECK-NEXT: fld ft1, %lo(.LCPI11_1)(a0)
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vfsgnjx.vv v9, v8, v8
; CHECK-NEXT: vfabs.v v9, v8
; CHECK-NEXT: vmflt.vf v0, v9, ft0
; CHECK-NEXT: vfadd.vf v9, v9, ft1
; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v9
Expand All @@ -264,7 +264,7 @@ define <vscale x 2 x double> @round_nxv2f64(<vscale x 2 x double> %x) {
; CHECK-NEXT: lui a0, %hi(.LCPI12_1)
; CHECK-NEXT: fld ft1, %lo(.LCPI12_1)(a0)
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vfsgnjx.vv v10, v8, v8
; CHECK-NEXT: vfabs.v v10, v8
; CHECK-NEXT: vmflt.vf v0, v10, ft0
; CHECK-NEXT: vfadd.vf v10, v10, ft1
; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v10
Expand All @@ -285,7 +285,7 @@ define <vscale x 4 x double> @round_nxv4f64(<vscale x 4 x double> %x) {
; CHECK-NEXT: lui a0, %hi(.LCPI13_1)
; CHECK-NEXT: fld ft1, %lo(.LCPI13_1)(a0)
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vfsgnjx.vv v12, v8, v8
; CHECK-NEXT: vfabs.v v12, v8
; CHECK-NEXT: vmflt.vf v0, v12, ft0
; CHECK-NEXT: vfadd.vf v12, v12, ft1
; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v12
Expand All @@ -306,7 +306,7 @@ define <vscale x 8 x double> @round_nxv8f64(<vscale x 8 x double> %x) {
; CHECK-NEXT: lui a0, %hi(.LCPI14_1)
; CHECK-NEXT: fld ft1, %lo(.LCPI14_1)(a0)
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vfsgnjx.vv v16, v8, v8
; CHECK-NEXT: vfabs.v v16, v8
; CHECK-NEXT: vmflt.vf v0, v16, ft0
; CHECK-NEXT: vfadd.vf v16, v16, ft1
; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v16
Expand Down
30 changes: 15 additions & 15 deletions llvm/test/CodeGen/RISCV/rvv/ftrunc-sdnode.ll
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ define <vscale x 1 x half> @trunc_nxv1f16(<vscale x 1 x half> %x) {
; CHECK-NEXT: lui a0, %hi(.LCPI0_0)
; CHECK-NEXT: flh ft0, %lo(.LCPI0_0)(a0)
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vfsgnjx.vv v9, v8, v8
; CHECK-NEXT: vfabs.v v9, v8
; CHECK-NEXT: vmflt.vf v0, v9, ft0
; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8
; CHECK-NEXT: vfcvt.f.x.v v9, v9
Expand All @@ -28,7 +28,7 @@ define <vscale x 2 x half> @trunc_nxv2f16(<vscale x 2 x half> %x) {
; CHECK-NEXT: lui a0, %hi(.LCPI1_0)
; CHECK-NEXT: flh ft0, %lo(.LCPI1_0)(a0)
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vfsgnjx.vv v9, v8, v8
; CHECK-NEXT: vfabs.v v9, v8
; CHECK-NEXT: vmflt.vf v0, v9, ft0
; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8
; CHECK-NEXT: vfcvt.f.x.v v9, v9
Expand All @@ -46,7 +46,7 @@ define <vscale x 4 x half> @trunc_nxv4f16(<vscale x 4 x half> %x) {
; CHECK-NEXT: lui a0, %hi(.LCPI2_0)
; CHECK-NEXT: flh ft0, %lo(.LCPI2_0)(a0)
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vfsgnjx.vv v9, v8, v8
; CHECK-NEXT: vfabs.v v9, v8
; CHECK-NEXT: vmflt.vf v0, v9, ft0
; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8
; CHECK-NEXT: vfcvt.f.x.v v9, v9
Expand All @@ -64,7 +64,7 @@ define <vscale x 8 x half> @trunc_nxv8f16(<vscale x 8 x half> %x) {
; CHECK-NEXT: lui a0, %hi(.LCPI3_0)
; CHECK-NEXT: flh ft0, %lo(.LCPI3_0)(a0)
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vfsgnjx.vv v10, v8, v8
; CHECK-NEXT: vfabs.v v10, v8
; CHECK-NEXT: vmflt.vf v0, v10, ft0
; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v8
; CHECK-NEXT: vfcvt.f.x.v v10, v10
Expand All @@ -82,7 +82,7 @@ define <vscale x 16 x half> @trunc_nxv16f16(<vscale x 16 x half> %x) {
; CHECK-NEXT: lui a0, %hi(.LCPI4_0)
; CHECK-NEXT: flh ft0, %lo(.LCPI4_0)(a0)
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vfsgnjx.vv v12, v8, v8
; CHECK-NEXT: vfabs.v v12, v8
; CHECK-NEXT: vmflt.vf v0, v12, ft0
; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v8
; CHECK-NEXT: vfcvt.f.x.v v12, v12
Expand All @@ -100,7 +100,7 @@ define <vscale x 32 x half> @trunc_nxv32f16(<vscale x 32 x half> %x) {
; CHECK-NEXT: lui a0, %hi(.LCPI5_0)
; CHECK-NEXT: flh ft0, %lo(.LCPI5_0)(a0)
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vfsgnjx.vv v16, v8, v8
; CHECK-NEXT: vfabs.v v16, v8
; CHECK-NEXT: vmflt.vf v0, v16, ft0
; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v8
; CHECK-NEXT: vfcvt.f.x.v v16, v16
Expand All @@ -118,7 +118,7 @@ define <vscale x 1 x float> @trunc_nxv1f32(<vscale x 1 x float> %x) {
; CHECK-NEXT: lui a0, %hi(.LCPI6_0)
; CHECK-NEXT: flw ft0, %lo(.LCPI6_0)(a0)
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfsgnjx.vv v9, v8, v8
; CHECK-NEXT: vfabs.v v9, v8
; CHECK-NEXT: vmflt.vf v0, v9, ft0
; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8
; CHECK-NEXT: vfcvt.f.x.v v9, v9
Expand All @@ -136,7 +136,7 @@ define <vscale x 2 x float> @trunc_nxv2f32(<vscale x 2 x float> %x) {
; CHECK-NEXT: lui a0, %hi(.LCPI7_0)
; CHECK-NEXT: flw ft0, %lo(.LCPI7_0)(a0)
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vfsgnjx.vv v9, v8, v8
; CHECK-NEXT: vfabs.v v9, v8
; CHECK-NEXT: vmflt.vf v0, v9, ft0
; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8
; CHECK-NEXT: vfcvt.f.x.v v9, v9
Expand All @@ -154,7 +154,7 @@ define <vscale x 4 x float> @trunc_nxv4f32(<vscale x 4 x float> %x) {
; CHECK-NEXT: lui a0, %hi(.LCPI8_0)
; CHECK-NEXT: flw ft0, %lo(.LCPI8_0)(a0)
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vfsgnjx.vv v10, v8, v8
; CHECK-NEXT: vfabs.v v10, v8
; CHECK-NEXT: vmflt.vf v0, v10, ft0
; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v8
; CHECK-NEXT: vfcvt.f.x.v v10, v10
Expand All @@ -172,7 +172,7 @@ define <vscale x 8 x float> @trunc_nxv8f32(<vscale x 8 x float> %x) {
; CHECK-NEXT: lui a0, %hi(.LCPI9_0)
; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a0)
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vfsgnjx.vv v12, v8, v8
; CHECK-NEXT: vfabs.v v12, v8
; CHECK-NEXT: vmflt.vf v0, v12, ft0
; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v8
; CHECK-NEXT: vfcvt.f.x.v v12, v12
Expand All @@ -190,7 +190,7 @@ define <vscale x 16 x float> @trunc_nxv16f32(<vscale x 16 x float> %x) {
; CHECK-NEXT: lui a0, %hi(.LCPI10_0)
; CHECK-NEXT: flw ft0, %lo(.LCPI10_0)(a0)
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vfsgnjx.vv v16, v8, v8
; CHECK-NEXT: vfabs.v v16, v8
; CHECK-NEXT: vmflt.vf v0, v16, ft0
; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v8
; CHECK-NEXT: vfcvt.f.x.v v16, v16
Expand All @@ -208,7 +208,7 @@ define <vscale x 1 x double> @trunc_nxv1f64(<vscale x 1 x double> %x) {
; CHECK-NEXT: lui a0, %hi(.LCPI11_0)
; CHECK-NEXT: fld ft0, %lo(.LCPI11_0)(a0)
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vfsgnjx.vv v9, v8, v8
; CHECK-NEXT: vfabs.v v9, v8
; CHECK-NEXT: vmflt.vf v0, v9, ft0
; CHECK-NEXT: vfcvt.rtz.x.f.v v9, v8
; CHECK-NEXT: vfcvt.f.x.v v9, v9
Expand All @@ -226,7 +226,7 @@ define <vscale x 2 x double> @trunc_nxv2f64(<vscale x 2 x double> %x) {
; CHECK-NEXT: lui a0, %hi(.LCPI12_0)
; CHECK-NEXT: fld ft0, %lo(.LCPI12_0)(a0)
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vfsgnjx.vv v10, v8, v8
; CHECK-NEXT: vfabs.v v10, v8
; CHECK-NEXT: vmflt.vf v0, v10, ft0
; CHECK-NEXT: vfcvt.rtz.x.f.v v10, v8
; CHECK-NEXT: vfcvt.f.x.v v10, v10
Expand All @@ -244,7 +244,7 @@ define <vscale x 4 x double> @trunc_nxv4f64(<vscale x 4 x double> %x) {
; CHECK-NEXT: lui a0, %hi(.LCPI13_0)
; CHECK-NEXT: fld ft0, %lo(.LCPI13_0)(a0)
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vfsgnjx.vv v12, v8, v8
; CHECK-NEXT: vfabs.v v12, v8
; CHECK-NEXT: vmflt.vf v0, v12, ft0
; CHECK-NEXT: vfcvt.rtz.x.f.v v12, v8
; CHECK-NEXT: vfcvt.f.x.v v12, v12
Expand All @@ -262,7 +262,7 @@ define <vscale x 8 x double> @trunc_nxv8f64(<vscale x 8 x double> %x) {
; CHECK-NEXT: lui a0, %hi(.LCPI14_0)
; CHECK-NEXT: fld ft0, %lo(.LCPI14_0)(a0)
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vfsgnjx.vv v16, v8, v8
; CHECK-NEXT: vfabs.v v16, v8
; CHECK-NEXT: vmflt.vf v0, v16, ft0
; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v8
; CHECK-NEXT: vfcvt.f.x.v v16, v16
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/RISCV/rvv/legalize-scalable-vectortype.ll
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@ define <vscale x 4 x i5> @trunc_nxv4i32_to_nxv4i5(<vscale x 4 x i32> %a) {
; CHECK-LABEL: trunc_nxv4i32_to_nxv4i5:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vnsrl.wx v10, v8, zero
; CHECK-NEXT: vncvt.x.x.w v10, v8
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v10, zero
; CHECK-NEXT: vncvt.x.x.w v8, v10
; CHECK-NEXT: ret
%v = trunc <vscale x 4 x i32> %a to <vscale x 4 x i5>
ret <vscale x 4 x i5> %v
Expand All @@ -18,9 +18,9 @@ define <vscale x 1 x i5> @trunc_nxv1i32_to_nxv1i5(<vscale x 1 x i32> %a) {
; CHECK-LABEL: trunc_nxv1i32_to_nxv1i5:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: ret
%v = trunc <vscale x 1 x i32> %a to <vscale x 1 x i5>
ret <vscale x 1 x i5> %v
Expand Down
28 changes: 14 additions & 14 deletions llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1038,7 +1038,7 @@ define <vscale x 8 x i64> @mgather_baseidx_sext_nxv8i8_nxv8i64(i64* %base, <vsca
; RV32-NEXT: vsext.vf8 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vncvt.x.x.w v24, v8
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand All @@ -1065,7 +1065,7 @@ define <vscale x 8 x i64> @mgather_baseidx_zext_nxv8i8_nxv8i64(i64* %base, <vsca
; RV32-NEXT: vzext.vf8 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vncvt.x.x.w v24, v8
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand Down Expand Up @@ -1116,7 +1116,7 @@ define <vscale x 8 x i64> @mgather_baseidx_sext_nxv8i16_nxv8i64(i64* %base, <vsc
; RV32-NEXT: vsext.vf4 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vncvt.x.x.w v24, v8
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand All @@ -1143,7 +1143,7 @@ define <vscale x 8 x i64> @mgather_baseidx_zext_nxv8i16_nxv8i64(i64* %base, <vsc
; RV32-NEXT: vzext.vf4 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vncvt.x.x.w v24, v8
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand Down Expand Up @@ -1193,7 +1193,7 @@ define <vscale x 8 x i64> @mgather_baseidx_sext_nxv8i32_nxv8i64(i64* %base, <vsc
; RV32-NEXT: vsext.vf2 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vncvt.x.x.w v24, v8
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand All @@ -1220,7 +1220,7 @@ define <vscale x 8 x i64> @mgather_baseidx_zext_nxv8i32_nxv8i64(i64* %base, <vsc
; RV32-NEXT: vzext.vf2 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vncvt.x.x.w v24, v8
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand All @@ -1246,7 +1246,7 @@ define <vscale x 8 x i64> @mgather_baseidx_nxv8i64(i64* %base, <vscale x 8 x i64
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v8, v8, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vncvt.x.x.w v24, v8
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand Down Expand Up @@ -1969,7 +1969,7 @@ define <vscale x 8 x double> @mgather_baseidx_sext_nxv8i8_nxv8f64(double* %base,
; RV32-NEXT: vsext.vf8 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vncvt.x.x.w v24, v8
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand All @@ -1996,7 +1996,7 @@ define <vscale x 8 x double> @mgather_baseidx_zext_nxv8i8_nxv8f64(double* %base,
; RV32-NEXT: vzext.vf8 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vncvt.x.x.w v24, v8
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand Down Expand Up @@ -2047,7 +2047,7 @@ define <vscale x 8 x double> @mgather_baseidx_sext_nxv8i16_nxv8f64(double* %base
; RV32-NEXT: vsext.vf4 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vncvt.x.x.w v24, v8
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand All @@ -2074,7 +2074,7 @@ define <vscale x 8 x double> @mgather_baseidx_zext_nxv8i16_nxv8f64(double* %base
; RV32-NEXT: vzext.vf4 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vncvt.x.x.w v24, v8
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand Down Expand Up @@ -2124,7 +2124,7 @@ define <vscale x 8 x double> @mgather_baseidx_sext_nxv8i32_nxv8f64(double* %base
; RV32-NEXT: vsext.vf2 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vncvt.x.x.w v24, v8
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand All @@ -2151,7 +2151,7 @@ define <vscale x 8 x double> @mgather_baseidx_zext_nxv8i32_nxv8f64(double* %base
; RV32-NEXT: vzext.vf2 v24, v8
; RV32-NEXT: vsll.vi v8, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vncvt.x.x.w v24, v8
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand All @@ -2177,7 +2177,7 @@ define <vscale x 8 x double> @mgather_baseidx_nxv8f64(double* %base, <vscale x 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v8, v8, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v24, v8, zero
; RV32-NEXT: vncvt.x.x.w v24, v8
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv.v.v v8, v16
Expand Down
68 changes: 34 additions & 34 deletions llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
Original file line number Diff line number Diff line change
Expand Up @@ -44,14 +44,14 @@ define void @mscatter_nxv2i16_truncstore_nxv2i8(<vscale x 2 x i16> %val, <vscale
; RV32-LABEL: mscatter_nxv2i16_truncstore_nxv2i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vncvt.x.x.w v8, v8
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv2i16_truncstore_nxv2i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vncvt.x.x.w v8, v8
; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t
; RV64-NEXT: ret
%tval = trunc <vscale x 2 x i16> %val to <vscale x 2 x i8>
Expand All @@ -63,18 +63,18 @@ define void @mscatter_nxv2i32_truncstore_nxv2i8(<vscale x 2 x i32> %val, <vscale
; RV32-LABEL: mscatter_nxv2i32_truncstore_nxv2i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vncvt.x.x.w v8, v8
; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vncvt.x.x.w v8, v8
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv2i32_truncstore_nxv2i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vncvt.x.x.w v8, v8
; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vncvt.x.x.w v8, v8
; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t
; RV64-NEXT: ret
%tval = trunc <vscale x 2 x i32> %val to <vscale x 2 x i8>
Expand All @@ -86,22 +86,22 @@ define void @mscatter_nxv2i64_truncstore_nxv2i8(<vscale x 2 x i64> %val, <vscale
; RV32-LABEL: mscatter_nxv2i64_truncstore_nxv2i8:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV32-NEXT: vnsrl.wx v11, v8, zero
; RV32-NEXT: vncvt.x.x.w v11, v8
; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; RV32-NEXT: vnsrl.wx v8, v11, zero
; RV32-NEXT: vncvt.x.x.w v8, v11
; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vncvt.x.x.w v8, v8
; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i8:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV64-NEXT: vnsrl.wx v12, v8, zero
; RV64-NEXT: vncvt.x.x.w v12, v8
; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; RV64-NEXT: vnsrl.wx v8, v12, zero
; RV64-NEXT: vncvt.x.x.w v8, v12
; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vncvt.x.x.w v8, v8
; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t
; RV64-NEXT: ret
%tval = trunc <vscale x 2 x i64> %val to <vscale x 2 x i8>
Expand Down Expand Up @@ -232,14 +232,14 @@ define void @mscatter_nxv2i32_truncstore_nxv2i16(<vscale x 2 x i32> %val, <vscal
; RV32-LABEL: mscatter_nxv2i32_truncstore_nxv2i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; RV32-NEXT: vnsrl.wx v8, v8, zero
; RV32-NEXT: vncvt.x.x.w v8, v8
; RV32-NEXT: vsoxei32.v v8, (zero), v9, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv2i32_truncstore_nxv2i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; RV64-NEXT: vnsrl.wx v8, v8, zero
; RV64-NEXT: vncvt.x.x.w v8, v8
; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t
; RV64-NEXT: ret
%tval = trunc <vscale x 2 x i32> %val to <vscale x 2 x i16>
Expand All @@ -251,18 +251,18 @@ define void @mscatter_nxv2i64_truncstore_nxv2i16(<vscale x 2 x i64> %val, <vscal
; RV32-LABEL: mscatter_nxv2i64_truncstore_nxv2i16:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV32-NEXT: vnsrl.wx v11, v8, zero
; RV32-NEXT: vncvt.x.x.w v11, v8
; RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; RV32-NEXT: vnsrl.wx v8, v11, zero
; RV32-NEXT: vncvt.x.x.w v8, v11
; RV32-NEXT: vsoxei32.v v8, (zero), v10, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i16:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV64-NEXT: vnsrl.wx v12, v8, zero
; RV64-NEXT: vncvt.x.x.w v12, v8
; RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; RV64-NEXT: vnsrl.wx v8, v12, zero
; RV64-NEXT: vncvt.x.x.w v8, v12
; RV64-NEXT: vsoxei64.v v8, (zero), v10, v0.t
; RV64-NEXT: ret
%tval = trunc <vscale x 2 x i64> %val to <vscale x 2 x i16>
Expand Down Expand Up @@ -466,14 +466,14 @@ define void @mscatter_nxv2i64_truncstore_nxv2i32(<vscale x 2 x i64> %val, <vscal
; RV32-LABEL: mscatter_nxv2i64_truncstore_nxv2i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV32-NEXT: vnsrl.wx v11, v8, zero
; RV32-NEXT: vncvt.x.x.w v11, v8
; RV32-NEXT: vsoxei32.v v11, (zero), v10, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: mscatter_nxv2i64_truncstore_nxv2i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; RV64-NEXT: vnsrl.wx v12, v8, zero
; RV64-NEXT: vncvt.x.x.w v12, v8
; RV64-NEXT: vsoxei64.v v12, (zero), v10, v0.t
; RV64-NEXT: ret
%tval = trunc <vscale x 2 x i64> %val to <vscale x 2 x i32>
Expand Down Expand Up @@ -827,7 +827,7 @@ define void @mscatter_baseidx_sext_nxv8i8_nxv8i64(<vscale x 8 x i64> %val, i64*
; RV32-NEXT: vsext.vf8 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v24, v16, zero
; RV32-NEXT: vncvt.x.x.w v24, v16
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand All @@ -852,7 +852,7 @@ define void @mscatter_baseidx_zext_nxv8i8_nxv8i64(<vscale x 8 x i64> %val, i64*
; RV32-NEXT: vzext.vf8 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v24, v16, zero
; RV32-NEXT: vncvt.x.x.w v24, v16
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -899,7 +899,7 @@ define void @mscatter_baseidx_sext_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, i64*
; RV32-NEXT: vsext.vf4 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v24, v16, zero
; RV32-NEXT: vncvt.x.x.w v24, v16
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand All @@ -924,7 +924,7 @@ define void @mscatter_baseidx_zext_nxv8i16_nxv8i64(<vscale x 8 x i64> %val, i64*
; RV32-NEXT: vzext.vf4 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v24, v16, zero
; RV32-NEXT: vncvt.x.x.w v24, v16
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -970,7 +970,7 @@ define void @mscatter_baseidx_sext_nxv8i32_nxv8i64(<vscale x 8 x i64> %val, i64*
; RV32-NEXT: vsext.vf2 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v24, v16, zero
; RV32-NEXT: vncvt.x.x.w v24, v16
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand All @@ -995,7 +995,7 @@ define void @mscatter_baseidx_zext_nxv8i32_nxv8i64(<vscale x 8 x i64> %val, i64*
; RV32-NEXT: vzext.vf2 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v24, v16, zero
; RV32-NEXT: vncvt.x.x.w v24, v16
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand All @@ -1019,7 +1019,7 @@ define void @mscatter_baseidx_nxv8i64(<vscale x 8 x i64> %val, i64* %base, <vsca
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v16, v16, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v24, v16, zero
; RV32-NEXT: vncvt.x.x.w v24, v16
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1609,7 +1609,7 @@ define void @mscatter_baseidx_sext_nxv8i8_nxv8f64(<vscale x 8 x double> %val, do
; RV32-NEXT: vsext.vf8 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v24, v16, zero
; RV32-NEXT: vncvt.x.x.w v24, v16
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand All @@ -1634,7 +1634,7 @@ define void @mscatter_baseidx_zext_nxv8i8_nxv8f64(<vscale x 8 x double> %val, do
; RV32-NEXT: vzext.vf8 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v24, v16, zero
; RV32-NEXT: vncvt.x.x.w v24, v16
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1681,7 +1681,7 @@ define void @mscatter_baseidx_sext_nxv8i16_nxv8f64(<vscale x 8 x double> %val, d
; RV32-NEXT: vsext.vf4 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v24, v16, zero
; RV32-NEXT: vncvt.x.x.w v24, v16
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand All @@ -1706,7 +1706,7 @@ define void @mscatter_baseidx_zext_nxv8i16_nxv8f64(<vscale x 8 x double> %val, d
; RV32-NEXT: vzext.vf4 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v24, v16, zero
; RV32-NEXT: vncvt.x.x.w v24, v16
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand Down Expand Up @@ -1752,7 +1752,7 @@ define void @mscatter_baseidx_sext_nxv8i32_nxv8f64(<vscale x 8 x double> %val, d
; RV32-NEXT: vsext.vf2 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v24, v16, zero
; RV32-NEXT: vncvt.x.x.w v24, v16
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand All @@ -1777,7 +1777,7 @@ define void @mscatter_baseidx_zext_nxv8i32_nxv8f64(<vscale x 8 x double> %val, d
; RV32-NEXT: vzext.vf2 v24, v16
; RV32-NEXT: vsll.vi v16, v24, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v24, v16, zero
; RV32-NEXT: vncvt.x.x.w v24, v16
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand All @@ -1801,7 +1801,7 @@ define void @mscatter_baseidx_nxv8f64(<vscale x 8 x double> %val, double* %base,
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vsll.vi v16, v16, 3
; RV32-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; RV32-NEXT: vnsrl.wx v24, v16, zero
; RV32-NEXT: vncvt.x.x.w v24, v16
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
Expand Down
30 changes: 15 additions & 15 deletions llvm/test/CodeGen/RISCV/rvv/vfabs-sdnode.ll
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ define <vscale x 1 x half> @vfabs_nxv1f16(<vscale x 1 x half> %v) {
; CHECK-LABEL: vfabs_nxv1f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vfsgnjx.vv v8, v8, v8
; CHECK-NEXT: vfabs.v v8, v8
; CHECK-NEXT: ret
%r = call <vscale x 1 x half> @llvm.fabs.nxv1f16(<vscale x 1 x half> %v)
ret <vscale x 1 x half> %r
Expand All @@ -22,7 +22,7 @@ define <vscale x 2 x half> @vfabs_nxv2f16(<vscale x 2 x half> %v) {
; CHECK-LABEL: vfabs_nxv2f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vfsgnjx.vv v8, v8, v8
; CHECK-NEXT: vfabs.v v8, v8
; CHECK-NEXT: ret
%r = call <vscale x 2 x half> @llvm.fabs.nxv2f16(<vscale x 2 x half> %v)
ret <vscale x 2 x half> %r
Expand All @@ -34,7 +34,7 @@ define <vscale x 4 x half> @vfabs_nxv4f16(<vscale x 4 x half> %v) {
; CHECK-LABEL: vfabs_nxv4f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vfsgnjx.vv v8, v8, v8
; CHECK-NEXT: vfabs.v v8, v8
; CHECK-NEXT: ret
%r = call <vscale x 4 x half> @llvm.fabs.nxv4f16(<vscale x 4 x half> %v)
ret <vscale x 4 x half> %r
Expand All @@ -46,7 +46,7 @@ define <vscale x 8 x half> @vfabs_nxv8f16(<vscale x 8 x half> %v) {
; CHECK-LABEL: vfabs_nxv8f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vfsgnjx.vv v8, v8, v8
; CHECK-NEXT: vfabs.v v8, v8
; CHECK-NEXT: ret
%r = call <vscale x 8 x half> @llvm.fabs.nxv8f16(<vscale x 8 x half> %v)
ret <vscale x 8 x half> %r
Expand All @@ -58,7 +58,7 @@ define <vscale x 16 x half> @vfabs_nxv16f16(<vscale x 16 x half> %v) {
; CHECK-LABEL: vfabs_nxv16f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vfsgnjx.vv v8, v8, v8
; CHECK-NEXT: vfabs.v v8, v8
; CHECK-NEXT: ret
%r = call <vscale x 16 x half> @llvm.fabs.nxv16f16(<vscale x 16 x half> %v)
ret <vscale x 16 x half> %r
Expand All @@ -70,7 +70,7 @@ define <vscale x 32 x half> @vfabs_nxv32f16(<vscale x 32 x half> %v) {
; CHECK-LABEL: vfabs_nxv32f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vfsgnjx.vv v8, v8, v8
; CHECK-NEXT: vfabs.v v8, v8
; CHECK-NEXT: ret
%r = call <vscale x 32 x half> @llvm.fabs.nxv32f16(<vscale x 32 x half> %v)
ret <vscale x 32 x half> %r
Expand All @@ -82,7 +82,7 @@ define <vscale x 1 x float> @vfabs_nxv1f32(<vscale x 1 x float> %v) {
; CHECK-LABEL: vfabs_nxv1f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfsgnjx.vv v8, v8, v8
; CHECK-NEXT: vfabs.v v8, v8
; CHECK-NEXT: ret
%r = call <vscale x 1 x float> @llvm.fabs.nxv1f32(<vscale x 1 x float> %v)
ret <vscale x 1 x float> %r
Expand All @@ -94,7 +94,7 @@ define <vscale x 2 x float> @vfabs_nxv2f32(<vscale x 2 x float> %v) {
; CHECK-LABEL: vfabs_nxv2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vfsgnjx.vv v8, v8, v8
; CHECK-NEXT: vfabs.v v8, v8
; CHECK-NEXT: ret
%r = call <vscale x 2 x float> @llvm.fabs.nxv2f32(<vscale x 2 x float> %v)
ret <vscale x 2 x float> %r
Expand All @@ -106,7 +106,7 @@ define <vscale x 4 x float> @vfabs_nxv4f32(<vscale x 4 x float> %v) {
; CHECK-LABEL: vfabs_nxv4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vfsgnjx.vv v8, v8, v8
; CHECK-NEXT: vfabs.v v8, v8
; CHECK-NEXT: ret
%r = call <vscale x 4 x float> @llvm.fabs.nxv4f32(<vscale x 4 x float> %v)
ret <vscale x 4 x float> %r
Expand All @@ -118,7 +118,7 @@ define <vscale x 8 x float> @vfabs_nxv8f32(<vscale x 8 x float> %v) {
; CHECK-LABEL: vfabs_nxv8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vfsgnjx.vv v8, v8, v8
; CHECK-NEXT: vfabs.v v8, v8
; CHECK-NEXT: ret
%r = call <vscale x 8 x float> @llvm.fabs.nxv8f32(<vscale x 8 x float> %v)
ret <vscale x 8 x float> %r
Expand All @@ -130,7 +130,7 @@ define <vscale x 16 x float> @vfabs_nxv16f32(<vscale x 16 x float> %v) {
; CHECK-LABEL: vfabs_nxv16f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vfsgnjx.vv v8, v8, v8
; CHECK-NEXT: vfabs.v v8, v8
; CHECK-NEXT: ret
%r = call <vscale x 16 x float> @llvm.fabs.nxv16f32(<vscale x 16 x float> %v)
ret <vscale x 16 x float> %r
Expand All @@ -142,7 +142,7 @@ define <vscale x 1 x double> @vfabs_nxv1f64(<vscale x 1 x double> %v) {
; CHECK-LABEL: vfabs_nxv1f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vfsgnjx.vv v8, v8, v8
; CHECK-NEXT: vfabs.v v8, v8
; CHECK-NEXT: ret
%r = call <vscale x 1 x double> @llvm.fabs.nxv1f64(<vscale x 1 x double> %v)
ret <vscale x 1 x double> %r
Expand All @@ -154,7 +154,7 @@ define <vscale x 2 x double> @vfabs_nxv2f64(<vscale x 2 x double> %v) {
; CHECK-LABEL: vfabs_nxv2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vfsgnjx.vv v8, v8, v8
; CHECK-NEXT: vfabs.v v8, v8
; CHECK-NEXT: ret
%r = call <vscale x 2 x double> @llvm.fabs.nxv2f64(<vscale x 2 x double> %v)
ret <vscale x 2 x double> %r
Expand All @@ -166,7 +166,7 @@ define <vscale x 4 x double> @vfabs_nxv4f64(<vscale x 4 x double> %v) {
; CHECK-LABEL: vfabs_nxv4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vfsgnjx.vv v8, v8, v8
; CHECK-NEXT: vfabs.v v8, v8
; CHECK-NEXT: ret
%r = call <vscale x 4 x double> @llvm.fabs.nxv4f64(<vscale x 4 x double> %v)
ret <vscale x 4 x double> %r
Expand All @@ -178,7 +178,7 @@ define <vscale x 8 x double> @vfabs_nxv8f64(<vscale x 8 x double> %v) {
; CHECK-LABEL: vfabs_nxv8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vfsgnjx.vv v8, v8, v8
; CHECK-NEXT: vfabs.v v8, v8
; CHECK-NEXT: ret
%r = call <vscale x 8 x double> @llvm.fabs.nxv8f64(<vscale x 8 x double> %v)
ret <vscale x 8 x double> %r
Expand Down
30 changes: 15 additions & 15 deletions llvm/test/CodeGen/RISCV/rvv/vfneg-sdnode.ll
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ define <vscale x 1 x half> @vfneg_vv_nxv1f16(<vscale x 1 x half> %va) {
; CHECK-LABEL: vfneg_vv_nxv1f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%vb = fneg <vscale x 1 x half> %va
ret <vscale x 1 x half> %vb
Expand All @@ -18,7 +18,7 @@ define <vscale x 2 x half> @vfneg_vv_nxv2f16(<vscale x 2 x half> %va) {
; CHECK-LABEL: vfneg_vv_nxv2f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%vb = fneg <vscale x 2 x half> %va
ret <vscale x 2 x half> %vb
Expand All @@ -28,7 +28,7 @@ define <vscale x 4 x half> @vfneg_vv_nxv4f16(<vscale x 4 x half> %va) {
; CHECK-LABEL: vfneg_vv_nxv4f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%vb = fneg <vscale x 4 x half> %va
ret <vscale x 4 x half> %vb
Expand All @@ -38,7 +38,7 @@ define <vscale x 8 x half> @vfneg_vv_nxv8f16(<vscale x 8 x half> %va) {
; CHECK-LABEL: vfneg_vv_nxv8f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%vb = fneg <vscale x 8 x half> %va
ret <vscale x 8 x half> %vb
Expand All @@ -48,7 +48,7 @@ define <vscale x 16 x half> @vfneg_vv_nxv16f16(<vscale x 16 x half> %va) {
; CHECK-LABEL: vfneg_vv_nxv16f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%vb = fneg <vscale x 16 x half> %va
ret <vscale x 16 x half> %vb
Expand All @@ -58,7 +58,7 @@ define <vscale x 32 x half> @vfneg_vv_nxv32f16(<vscale x 32 x half> %va) {
; CHECK-LABEL: vfneg_vv_nxv32f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m8, ta, mu
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%vb = fneg <vscale x 32 x half> %va
ret <vscale x 32 x half> %vb
Expand All @@ -68,7 +68,7 @@ define <vscale x 1 x float> @vfneg_vv_nxv1f32(<vscale x 1 x float> %va) {
; CHECK-LABEL: vfneg_vv_nxv1f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%vb = fneg <vscale x 1 x float> %va
ret <vscale x 1 x float> %vb
Expand All @@ -78,7 +78,7 @@ define <vscale x 2 x float> @vfneg_vv_nxv2f32(<vscale x 2 x float> %va) {
; CHECK-LABEL: vfneg_vv_nxv2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%vb = fneg <vscale x 2 x float> %va
ret <vscale x 2 x float> %vb
Expand All @@ -88,7 +88,7 @@ define <vscale x 4 x float> @vfneg_vv_nxv4f32(<vscale x 4 x float> %va) {
; CHECK-LABEL: vfneg_vv_nxv4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%vb = fneg <vscale x 4 x float> %va
ret <vscale x 4 x float> %vb
Expand All @@ -98,7 +98,7 @@ define <vscale x 8 x float> @vfneg_vv_nxv8f32(<vscale x 8 x float> %va) {
; CHECK-LABEL: vfneg_vv_nxv8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%vb = fneg <vscale x 8 x float> %va
ret <vscale x 8 x float> %vb
Expand All @@ -108,7 +108,7 @@ define <vscale x 16 x float> @vfneg_vv_nxv16f32(<vscale x 16 x float> %va) {
; CHECK-LABEL: vfneg_vv_nxv16f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, mu
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%vb = fneg <vscale x 16 x float> %va
ret <vscale x 16 x float> %vb
Expand All @@ -118,7 +118,7 @@ define <vscale x 1 x double> @vfneg_vv_nxv1f64(<vscale x 1 x double> %va) {
; CHECK-LABEL: vfneg_vv_nxv1f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%vb = fneg <vscale x 1 x double> %va
ret <vscale x 1 x double> %vb
Expand All @@ -128,7 +128,7 @@ define <vscale x 2 x double> @vfneg_vv_nxv2f64(<vscale x 2 x double> %va) {
; CHECK-LABEL: vfneg_vv_nxv2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%vb = fneg <vscale x 2 x double> %va
ret <vscale x 2 x double> %vb
Expand All @@ -138,7 +138,7 @@ define <vscale x 4 x double> @vfneg_vv_nxv4f64(<vscale x 4 x double> %va) {
; CHECK-LABEL: vfneg_vv_nxv4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%vb = fneg <vscale x 4 x double> %va
ret <vscale x 4 x double> %vb
Expand All @@ -148,7 +148,7 @@ define <vscale x 8 x double> @vfneg_vv_nxv8f64(<vscale x 8 x double> %va) {
; CHECK-LABEL: vfneg_vv_nxv8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%vb = fneg <vscale x 8 x double> %va
ret <vscale x 8 x double> %vb
Expand Down
36 changes: 18 additions & 18 deletions llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ define <vscale x 1 x half> @vfneg_vv_nxv1f16_unmasked(<vscale x 1 x half> %va, i
; CHECK-LABEL: vfneg_vv_nxv1f16_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
%m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
Expand All @@ -44,7 +44,7 @@ define <vscale x 2 x half> @vfneg_vv_nxv2f16_unmasked(<vscale x 2 x half> %va, i
; CHECK-LABEL: vfneg_vv_nxv2f16_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
%m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
Expand All @@ -68,7 +68,7 @@ define <vscale x 4 x half> @vfneg_vv_nxv4f16_unmasked(<vscale x 4 x half> %va, i
; CHECK-LABEL: vfneg_vv_nxv4f16_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
%m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
Expand All @@ -92,7 +92,7 @@ define <vscale x 8 x half> @vfneg_vv_nxv8f16_unmasked(<vscale x 8 x half> %va, i
; CHECK-LABEL: vfneg_vv_nxv8f16_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
%m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
Expand All @@ -116,7 +116,7 @@ define <vscale x 16 x half> @vfneg_vv_nxv16f16_unmasked(<vscale x 16 x half> %va
; CHECK-LABEL: vfneg_vv_nxv16f16_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
%m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
Expand All @@ -140,7 +140,7 @@ define <vscale x 32 x half> @vfneg_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; CHECK-LABEL: vfneg_vv_nxv32f16_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 32 x i1> poison, i1 true, i32 0
%m = shufflevector <vscale x 32 x i1> %head, <vscale x 32 x i1> poison, <vscale x 32 x i32> zeroinitializer
Expand All @@ -164,7 +164,7 @@ define <vscale x 1 x float> @vfneg_vv_nxv1f32_unmasked(<vscale x 1 x float> %va,
; CHECK-LABEL: vfneg_vv_nxv1f32_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
%m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
Expand All @@ -188,7 +188,7 @@ define <vscale x 2 x float> @vfneg_vv_nxv2f32_unmasked(<vscale x 2 x float> %va,
; CHECK-LABEL: vfneg_vv_nxv2f32_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
%m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
Expand All @@ -212,7 +212,7 @@ define <vscale x 4 x float> @vfneg_vv_nxv4f32_unmasked(<vscale x 4 x float> %va,
; CHECK-LABEL: vfneg_vv_nxv4f32_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
%m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
Expand All @@ -236,7 +236,7 @@ define <vscale x 8 x float> @vfneg_vv_nxv8f32_unmasked(<vscale x 8 x float> %va,
; CHECK-LABEL: vfneg_vv_nxv8f32_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
%m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
Expand All @@ -260,7 +260,7 @@ define <vscale x 16 x float> @vfneg_vv_nxv16f32_unmasked(<vscale x 16 x float> %
; CHECK-LABEL: vfneg_vv_nxv16f32_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
%m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
Expand All @@ -284,7 +284,7 @@ define <vscale x 1 x double> @vfneg_vv_nxv1f64_unmasked(<vscale x 1 x double> %v
; CHECK-LABEL: vfneg_vv_nxv1f64_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
%m = shufflevector <vscale x 1 x i1> %head, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
Expand All @@ -308,7 +308,7 @@ define <vscale x 2 x double> @vfneg_vv_nxv2f64_unmasked(<vscale x 2 x double> %v
; CHECK-LABEL: vfneg_vv_nxv2f64_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
%m = shufflevector <vscale x 2 x i1> %head, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
Expand All @@ -332,7 +332,7 @@ define <vscale x 4 x double> @vfneg_vv_nxv4f64_unmasked(<vscale x 4 x double> %v
; CHECK-LABEL: vfneg_vv_nxv4f64_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
%m = shufflevector <vscale x 4 x i1> %head, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
Expand All @@ -356,7 +356,7 @@ define <vscale x 7 x double> @vfneg_vv_nxv7f64_unmasked(<vscale x 7 x double> %v
; CHECK-LABEL: vfneg_vv_nxv7f64_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 7 x i1> poison, i1 true, i32 0
%m = shufflevector <vscale x 7 x i1> %head, <vscale x 7 x i1> poison, <vscale x 7 x i32> zeroinitializer
Expand All @@ -380,7 +380,7 @@ define <vscale x 8 x double> @vfneg_vv_nxv8f64_unmasked(<vscale x 8 x double> %v
; CHECK-LABEL: vfneg_vv_nxv8f64_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
%m = shufflevector <vscale x 8 x i1> %head, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
Expand Down Expand Up @@ -431,13 +431,13 @@ define <vscale x 16 x double> @vfneg_vv_nxv16f64_unmasked(<vscale x 16 x double>
; CHECK-NEXT: li a3, 0
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; CHECK-NEXT: sub a1, a0, a1
; CHECK-NEXT: vfsgnjn.vv v8, v8, v8
; CHECK-NEXT: vfneg.v v8, v8
; CHECK-NEXT: bltu a0, a1, .LBB33_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a3, a1
; CHECK-NEXT: .LBB33_4:
; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, mu
; CHECK-NEXT: vfsgnjn.vv v16, v16, v16
; CHECK-NEXT: vfneg.v v16, v16
; CHECK-NEXT: ret
%head = insertelement <vscale x 16 x i1> poison, i1 true, i32 0
%m = shufflevector <vscale x 16 x i1> %head, <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer
Expand Down
68 changes: 34 additions & 34 deletions llvm/test/CodeGen/RISCV/rvv/vfptoi-sdnode.ll
Original file line number Diff line number Diff line change
Expand Up @@ -636,7 +636,7 @@ define <vscale x 1 x i8> @vfptosi_nxv1f32_nxv1i8(<vscale x 1 x float> %va) {
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v9, zero
; CHECK-NEXT: vncvt.x.x.w v8, v9
; CHECK-NEXT: ret
%evec = fptosi <vscale x 1 x float> %va to <vscale x 1 x i8>
ret <vscale x 1 x i8> %evec
Expand All @@ -648,7 +648,7 @@ define <vscale x 1 x i8> @vfptoui_nxv1f32_nxv1i8(<vscale x 1 x float> %va) {
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v9, zero
; CHECK-NEXT: vncvt.x.x.w v8, v9
; CHECK-NEXT: ret
%evec = fptoui <vscale x 1 x float> %va to <vscale x 1 x i8>
ret <vscale x 1 x i8> %evec
Expand Down Expand Up @@ -748,7 +748,7 @@ define <vscale x 2 x i8> @vfptosi_nxv2f32_nxv2i8(<vscale x 2 x float> %va) {
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v9, zero
; CHECK-NEXT: vncvt.x.x.w v8, v9
; CHECK-NEXT: ret
%evec = fptosi <vscale x 2 x float> %va to <vscale x 2 x i8>
ret <vscale x 2 x i8> %evec
Expand All @@ -760,7 +760,7 @@ define <vscale x 2 x i8> @vfptoui_nxv2f32_nxv2i8(<vscale x 2 x float> %va) {
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v9, zero
; CHECK-NEXT: vncvt.x.x.w v8, v9
; CHECK-NEXT: ret
%evec = fptoui <vscale x 2 x float> %va to <vscale x 2 x i8>
ret <vscale x 2 x i8> %evec
Expand Down Expand Up @@ -860,7 +860,7 @@ define <vscale x 4 x i8> @vfptosi_nxv4f32_nxv4i8(<vscale x 4 x float> %va) {
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v10, zero
; CHECK-NEXT: vncvt.x.x.w v8, v10
; CHECK-NEXT: ret
%evec = fptosi <vscale x 4 x float> %va to <vscale x 4 x i8>
ret <vscale x 4 x i8> %evec
Expand All @@ -872,7 +872,7 @@ define <vscale x 4 x i8> @vfptoui_nxv4f32_nxv4i8(<vscale x 4 x float> %va) {
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v10, zero
; CHECK-NEXT: vncvt.x.x.w v8, v10
; CHECK-NEXT: ret
%evec = fptoui <vscale x 4 x float> %va to <vscale x 4 x i8>
ret <vscale x 4 x i8> %evec
Expand Down Expand Up @@ -972,7 +972,7 @@ define <vscale x 8 x i8> @vfptosi_nxv8f32_nxv8i8(<vscale x 8 x float> %va) {
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v12, zero
; CHECK-NEXT: vncvt.x.x.w v8, v12
; CHECK-NEXT: ret
%evec = fptosi <vscale x 8 x float> %va to <vscale x 8 x i8>
ret <vscale x 8 x i8> %evec
Expand All @@ -984,7 +984,7 @@ define <vscale x 8 x i8> @vfptoui_nxv8f32_nxv8i8(<vscale x 8 x float> %va) {
; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v12, zero
; CHECK-NEXT: vncvt.x.x.w v8, v12
; CHECK-NEXT: ret
%evec = fptoui <vscale x 8 x float> %va to <vscale x 8 x i8>
ret <vscale x 8 x i8> %evec
Expand Down Expand Up @@ -1084,7 +1084,7 @@ define <vscale x 16 x i8> @vfptosi_nxv16f32_nxv16i8(<vscale x 16 x float> %va) {
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8
; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v16, zero
; CHECK-NEXT: vncvt.x.x.w v8, v16
; CHECK-NEXT: ret
%evec = fptosi <vscale x 16 x float> %va to <vscale x 16 x i8>
ret <vscale x 16 x i8> %evec
Expand All @@ -1096,7 +1096,7 @@ define <vscale x 16 x i8> @vfptoui_nxv16f32_nxv16i8(<vscale x 16 x float> %va) {
; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8
; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v16, zero
; CHECK-NEXT: vncvt.x.x.w v8, v16
; CHECK-NEXT: ret
%evec = fptoui <vscale x 16 x float> %va to <vscale x 16 x i8>
ret <vscale x 16 x i8> %evec
Expand Down Expand Up @@ -1174,9 +1174,9 @@ define <vscale x 1 x i8> @vfptosi_nxv1f64_nxv1i8(<vscale x 1 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v9, zero
; CHECK-NEXT: vncvt.x.x.w v8, v9
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: ret
%evec = fptosi <vscale x 1 x double> %va to <vscale x 1 x i8>
ret <vscale x 1 x i8> %evec
Expand All @@ -1188,9 +1188,9 @@ define <vscale x 1 x i8> @vfptoui_nxv1f64_nxv1i8(<vscale x 1 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v9, zero
; CHECK-NEXT: vncvt.x.x.w v8, v9
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: ret
%evec = fptoui <vscale x 1 x double> %va to <vscale x 1 x i8>
ret <vscale x 1 x i8> %evec
Expand All @@ -1202,7 +1202,7 @@ define <vscale x 1 x i16> @vfptosi_nxv1f64_nxv1i16(<vscale x 1 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v9, zero
; CHECK-NEXT: vncvt.x.x.w v8, v9
; CHECK-NEXT: ret
%evec = fptosi <vscale x 1 x double> %va to <vscale x 1 x i16>
ret <vscale x 1 x i16> %evec
Expand All @@ -1214,7 +1214,7 @@ define <vscale x 1 x i16> @vfptoui_nxv1f64_nxv1i16(<vscale x 1 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v9, zero
; CHECK-NEXT: vncvt.x.x.w v8, v9
; CHECK-NEXT: ret
%evec = fptoui <vscale x 1 x double> %va to <vscale x 1 x i16>
ret <vscale x 1 x i16> %evec
Expand Down Expand Up @@ -1292,9 +1292,9 @@ define <vscale x 2 x i8> @vfptosi_nxv2f64_nxv2i8(<vscale x 2 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v10, zero
; CHECK-NEXT: vncvt.x.x.w v8, v10
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: ret
%evec = fptosi <vscale x 2 x double> %va to <vscale x 2 x i8>
ret <vscale x 2 x i8> %evec
Expand All @@ -1306,9 +1306,9 @@ define <vscale x 2 x i8> @vfptoui_nxv2f64_nxv2i8(<vscale x 2 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v10, zero
; CHECK-NEXT: vncvt.x.x.w v8, v10
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: ret
%evec = fptoui <vscale x 2 x double> %va to <vscale x 2 x i8>
ret <vscale x 2 x i8> %evec
Expand All @@ -1320,7 +1320,7 @@ define <vscale x 2 x i16> @vfptosi_nxv2f64_nxv2i16(<vscale x 2 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v10, v8
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v10, zero
; CHECK-NEXT: vncvt.x.x.w v8, v10
; CHECK-NEXT: ret
%evec = fptosi <vscale x 2 x double> %va to <vscale x 2 x i16>
ret <vscale x 2 x i16> %evec
Expand All @@ -1332,7 +1332,7 @@ define <vscale x 2 x i16> @vfptoui_nxv2f64_nxv2i16(<vscale x 2 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v10, v8
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v10, zero
; CHECK-NEXT: vncvt.x.x.w v8, v10
; CHECK-NEXT: ret
%evec = fptoui <vscale x 2 x double> %va to <vscale x 2 x i16>
ret <vscale x 2 x i16> %evec
Expand Down Expand Up @@ -1410,9 +1410,9 @@ define <vscale x 4 x i8> @vfptosi_nxv4f64_nxv4i8(<vscale x 4 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v12, zero
; CHECK-NEXT: vncvt.x.x.w v8, v12
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: ret
%evec = fptosi <vscale x 4 x double> %va to <vscale x 4 x i8>
ret <vscale x 4 x i8> %evec
Expand All @@ -1424,9 +1424,9 @@ define <vscale x 4 x i8> @vfptoui_nxv4f64_nxv4i8(<vscale x 4 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v12, zero
; CHECK-NEXT: vncvt.x.x.w v8, v12
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v8, zero
; CHECK-NEXT: vncvt.x.x.w v8, v8
; CHECK-NEXT: ret
%evec = fptoui <vscale x 4 x double> %va to <vscale x 4 x i8>
ret <vscale x 4 x i8> %evec
Expand All @@ -1438,7 +1438,7 @@ define <vscale x 4 x i16> @vfptosi_nxv4f64_nxv4i16(<vscale x 4 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v12, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v12, zero
; CHECK-NEXT: vncvt.x.x.w v8, v12
; CHECK-NEXT: ret
%evec = fptosi <vscale x 4 x double> %va to <vscale x 4 x i16>
ret <vscale x 4 x i16> %evec
Expand All @@ -1450,7 +1450,7 @@ define <vscale x 4 x i16> @vfptoui_nxv4f64_nxv4i16(<vscale x 4 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v12, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v12, zero
; CHECK-NEXT: vncvt.x.x.w v8, v12
; CHECK-NEXT: ret
%evec = fptoui <vscale x 4 x double> %va to <vscale x 4 x i16>
ret <vscale x 4 x i16> %evec
Expand Down Expand Up @@ -1528,9 +1528,9 @@ define <vscale x 8 x i8> @vfptosi_nxv8f64_nxv8i8(<vscale x 8 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vnsrl.wx v10, v16, zero
; CHECK-NEXT: vncvt.x.x.w v10, v16
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v10, zero
; CHECK-NEXT: vncvt.x.x.w v8, v10
; CHECK-NEXT: ret
%evec = fptosi <vscale x 8 x double> %va to <vscale x 8 x i8>
ret <vscale x 8 x i8> %evec
Expand All @@ -1542,9 +1542,9 @@ define <vscale x 8 x i8> @vfptoui_nxv8f64_nxv8i8(<vscale x 8 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vnsrl.wx v10, v16, zero
; CHECK-NEXT: vncvt.x.x.w v10, v16
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v10, zero
; CHECK-NEXT: vncvt.x.x.w v8, v10
; CHECK-NEXT: ret
%evec = fptoui <vscale x 8 x double> %va to <vscale x 8 x i8>
ret <vscale x 8 x i8> %evec
Expand All @@ -1556,7 +1556,7 @@ define <vscale x 8 x i16> @vfptosi_nxv8f64_nxv8i16(<vscale x 8 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v16, zero
; CHECK-NEXT: vncvt.x.x.w v8, v16
; CHECK-NEXT: ret
%evec = fptosi <vscale x 8 x double> %va to <vscale x 8 x i16>
ret <vscale x 8 x i16> %evec
Expand All @@ -1568,7 +1568,7 @@ define <vscale x 8 x i16> @vfptoui_nxv8f64_nxv8i16(<vscale x 8 x double> %va) {
; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, mu
; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vnsrl.wx v8, v16, zero
; CHECK-NEXT: vncvt.x.x.w v8, v16
; CHECK-NEXT: ret
%evec = fptoui <vscale x 8 x double> %va to <vscale x 8 x i16>
ret <vscale x 8 x i16> %evec
Expand Down
Loading