6 changes: 2 additions & 4 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,8 @@ define <4 x float> @hang_when_merging_stores_after_legalization(<8 x float> %x,
; LMULMAX1-LABEL: hang_when_merging_stores_after_legalization:
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: li a0, 2
; LMULMAX1-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; LMULMAX1-NEXT: vmv.s.x v0, a0
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; LMULMAX1-NEXT: vmv.s.x v0, a0
; LMULMAX1-NEXT: vrgather.vi v12, v8, 0
; LMULMAX1-NEXT: vrgather.vi v12, v9, 3, v0.t
; LMULMAX1-NEXT: vsetivli zero, 3, e32, m1, tu, ma
Expand Down Expand Up @@ -152,9 +151,8 @@ define void @buildvec_merge0_v4f32(<4 x float>* %x, float %f) {
; CHECK-LABEL: buildvec_merge0_v4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: li a1, 6
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; CHECK-NEXT: vmv.s.x v0, a1
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vmv.s.x v0, a1
; CHECK-NEXT: vfmv.v.f v8, fa0
; CHECK-NEXT: lui a1, 262144
; CHECK-NEXT: vmerge.vxm v8, v8, a1, v0
Expand Down
26 changes: 9 additions & 17 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,8 @@ define <4 x half> @shuffle_v4f16(<4 x half> %x, <4 x half> %y) {
; CHECK-LABEL: shuffle_v4f16:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 11
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
; CHECK-NEXT: ret
%s = shufflevector <4 x half> %x, <4 x half> %y, <4 x i32> <i32 0, i32 1, i32 6, i32 3>
Expand All @@ -19,9 +18,8 @@ define <8 x float> @shuffle_v8f32(<8 x float> %x, <8 x float> %y) {
; CHECK-LABEL: shuffle_v8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 236
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
; CHECK-NEXT: ret
%s = shufflevector <8 x float> %x, <8 x float> %y, <8 x i32> <i32 8, i32 9, i32 2, i32 3, i32 12, i32 5, i32 6, i32 7>
Expand All @@ -34,9 +32,8 @@ define <4 x double> @shuffle_fv_v4f64(<4 x double> %x) {
; RV32-NEXT: li a0, 9
; RV32-NEXT: lui a1, %hi(.LCPI2_0)
; RV32-NEXT: fld fa5, %lo(.LCPI2_0)(a1)
; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; RV32-NEXT: vmv.s.x v0, a0
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV32-NEXT: vmv.s.x v0, a0
; RV32-NEXT: vfmerge.vfm v8, v8, fa5, v0
; RV32-NEXT: ret
;
Expand All @@ -45,9 +42,8 @@ define <4 x double> @shuffle_fv_v4f64(<4 x double> %x) {
; RV64-NEXT: lui a0, %hi(.LCPI2_0)
; RV64-NEXT: fld fa5, %lo(.LCPI2_0)(a0)
; RV64-NEXT: li a0, 9
; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; RV64-NEXT: vmv.s.x v0, a0
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-NEXT: vmv.s.x v0, a0
; RV64-NEXT: vfmerge.vfm v8, v8, fa5, v0
; RV64-NEXT: ret
%s = shufflevector <4 x double> <double 2.0, double 2.0, double 2.0, double 2.0>, <4 x double> %x, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
Expand All @@ -60,9 +56,8 @@ define <4 x double> @shuffle_vf_v4f64(<4 x double> %x) {
; RV32-NEXT: li a0, 6
; RV32-NEXT: lui a1, %hi(.LCPI3_0)
; RV32-NEXT: fld fa5, %lo(.LCPI3_0)(a1)
; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; RV32-NEXT: vmv.s.x v0, a0
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV32-NEXT: vmv.s.x v0, a0
; RV32-NEXT: vfmerge.vfm v8, v8, fa5, v0
; RV32-NEXT: ret
;
Expand All @@ -71,9 +66,8 @@ define <4 x double> @shuffle_vf_v4f64(<4 x double> %x) {
; RV64-NEXT: lui a0, %hi(.LCPI3_0)
; RV64-NEXT: fld fa5, %lo(.LCPI3_0)(a0)
; RV64-NEXT: li a0, 6
; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; RV64-NEXT: vmv.s.x v0, a0
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-NEXT: vmv.s.x v0, a0
; RV64-NEXT: vfmerge.vfm v8, v8, fa5, v0
; RV64-NEXT: ret
%s = shufflevector <4 x double> %x, <4 x double> <double 2.0, double 2.0, double 2.0, double 2.0>, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
Expand Down Expand Up @@ -162,9 +156,8 @@ define <4 x double> @vrgather_shuffle_xv_v4f64(<4 x double> %x) {
; RV32-LABEL: vrgather_shuffle_xv_v4f64:
; RV32: # %bb.0:
; RV32-NEXT: li a0, 12
; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; RV32-NEXT: vmv.s.x v0, a0
; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; RV32-NEXT: vmv.s.x v0, a0
; RV32-NEXT: lui a0, %hi(.LCPI7_0)
; RV32-NEXT: addi a0, a0, %lo(.LCPI7_0)
; RV32-NEXT: vlse64.v v10, (a0), zero
Expand All @@ -177,13 +170,12 @@ define <4 x double> @vrgather_shuffle_xv_v4f64(<4 x double> %x) {
;
; RV64-LABEL: vrgather_shuffle_xv_v4f64:
; RV64: # %bb.0:
; RV64-NEXT: li a0, 12
; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; RV64-NEXT: vmv.s.x v0, a0
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV64-NEXT: lui a0, %hi(.LCPI7_0)
; RV64-NEXT: addi a0, a0, %lo(.LCPI7_0)
; RV64-NEXT: vlse64.v v10, (a0), zero
; RV64-NEXT: li a0, 12
; RV64-NEXT: vmv.s.x v0, a0
; RV64-NEXT: vid.v v12
; RV64-NEXT: vrsub.vi v12, v12, 4
; RV64-NEXT: vrgather.vv v10, v8, v12, v0.t
Expand Down
29 changes: 11 additions & 18 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,8 @@ define <4 x i16> @shuffle_v4i16(<4 x i16> %x, <4 x i16> %y) {
; CHECK-LABEL: shuffle_v4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 11
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
; CHECK-NEXT: ret
%s = shufflevector <4 x i16> %x, <4 x i16> %y, <4 x i32> <i32 0, i32 1, i32 6, i32 3>
Expand All @@ -19,9 +18,8 @@ define <8 x i32> @shuffle_v8i32(<8 x i32> %x, <8 x i32> %y) {
; CHECK-LABEL: shuffle_v8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 203
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
; CHECK-NEXT: ret
%s = shufflevector <8 x i32> %x, <8 x i32> %y, <8 x i32> <i32 0, i32 1, i32 10, i32 3, i32 12, i32 13, i32 6, i32 7>
Expand All @@ -32,9 +30,8 @@ define <4 x i16> @shuffle_xv_v4i16(<4 x i16> %x) {
; CHECK-LABEL: shuffle_xv_v4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 9
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vmerge.vim v8, v8, 5, v0
; CHECK-NEXT: ret
%s = shufflevector <4 x i16> <i16 5, i16 5, i16 5, i16 5>, <4 x i16> %x, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
Expand All @@ -45,9 +42,8 @@ define <4 x i16> @shuffle_vx_v4i16(<4 x i16> %x) {
; CHECK-LABEL: shuffle_vx_v4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 6
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vmerge.vim v8, v8, 5, v0
; CHECK-NEXT: ret
%s = shufflevector <4 x i16> %x, <4 x i16> <i16 5, i16 5, i16 5, i16 5>, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
Expand Down Expand Up @@ -103,9 +99,8 @@ define <4 x i16> @vrgather_shuffle_xv_v4i16(<4 x i16> %x) {
; CHECK-LABEL: vrgather_shuffle_xv_v4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 12
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vid.v v9
; CHECK-NEXT: vrsub.vi v10, v9, 4
; CHECK-NEXT: vmv.v.i v9, 5
Expand Down Expand Up @@ -238,13 +233,12 @@ define <8 x i64> @vrgather_shuffle_xv_v8i64(<8 x i64> %x) {
;
; RV64-LABEL: vrgather_shuffle_xv_v8i64:
; RV64: # %bb.0:
; RV64-NEXT: li a0, 113
; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; RV64-NEXT: vmv.s.x v0, a0
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV64-NEXT: lui a0, %hi(.LCPI12_0)
; RV64-NEXT: addi a0, a0, %lo(.LCPI12_0)
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV64-NEXT: vle64.v v16, (a0)
; RV64-NEXT: li a0, 113
; RV64-NEXT: vmv.s.x v0, a0
; RV64-NEXT: vmv.v.i v12, -1
; RV64-NEXT: vrgather.vv v12, v8, v16, v0.t
; RV64-NEXT: vmv.v.v v8, v12
Expand Down Expand Up @@ -273,13 +267,12 @@ define <8 x i64> @vrgather_shuffle_vx_v8i64(<8 x i64> %x) {
;
; RV64-LABEL: vrgather_shuffle_vx_v8i64:
; RV64: # %bb.0:
; RV64-NEXT: li a0, 115
; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; RV64-NEXT: vmv.s.x v0, a0
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV64-NEXT: lui a0, %hi(.LCPI13_0)
; RV64-NEXT: addi a0, a0, %lo(.LCPI13_0)
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV64-NEXT: vle64.v v16, (a0)
; RV64-NEXT: li a0, 115
; RV64-NEXT: vmv.s.x v0, a0
; RV64-NEXT: vmv.v.i v12, 5
; RV64-NEXT: vrgather.vv v12, v8, v16, v0.t
; RV64-NEXT: vmv.v.v v8, v12
Expand Down
3 changes: 1 addition & 2 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll
Original file line number Diff line number Diff line change
Expand Up @@ -230,9 +230,8 @@ define void @splat_v4i64(ptr %x, i64 %y) {
; LMULMAX1-RV32-LABEL: splat_v4i64:
; LMULMAX1-RV32: # %bb.0:
; LMULMAX1-RV32-NEXT: li a3, 5
; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; LMULMAX1-RV32-NEXT: vmv.s.x v0, a3
; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; LMULMAX1-RV32-NEXT: vmv.s.x v0, a3
; LMULMAX1-RV32-NEXT: vmv.v.x v8, a2
; LMULMAX1-RV32-NEXT: vmerge.vxm v8, v8, a1, v0
; LMULMAX1-RV32-NEXT: addi a1, a0, 16
Expand Down
3 changes: 1 addition & 2 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
Original file line number Diff line number Diff line change
Expand Up @@ -745,12 +745,11 @@ define <128 x i1> @buildvec_mask_v128i1() {
; RV32-LMULMAX8: # %bb.0:
; RV32-LMULMAX8-NEXT: lui a0, 748388
; RV32-LMULMAX8-NEXT: addi a0, a0, -1793
; RV32-LMULMAX8-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-LMULMAX8-NEXT: vsetivli zero, 2, e32, m1, tu, ma
; RV32-LMULMAX8-NEXT: vmv.s.x v8, a0
; RV32-LMULMAX8-NEXT: lui a0, 748384
; RV32-LMULMAX8-NEXT: addi a0, a0, 1776
; RV32-LMULMAX8-NEXT: vmv.s.x v0, a0
; RV32-LMULMAX8-NEXT: vsetivli zero, 2, e32, m1, tu, ma
; RV32-LMULMAX8-NEXT: vslideup.vi v0, v8, 1
; RV32-LMULMAX8-NEXT: lui a0, 551776
; RV32-LMULMAX8-NEXT: addi a0, a0, 1776
Expand Down
320 changes: 108 additions & 212 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll

Large diffs are not rendered by default.

144 changes: 48 additions & 96 deletions llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll

Large diffs are not rendered by default.

45 changes: 15 additions & 30 deletions llvm/test/CodeGen/RISCV/rvv/insertelt-fp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,8 @@ define <vscale x 1 x half> @insertelt_nxv1f16_0(<vscale x 1 x half> %v, half %el
define <vscale x 1 x half> @insertelt_nxv1f16_imm(<vscale x 1 x half> %v, half %elt) {
; CHECK-LABEL: insertelt_nxv1f16_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: vsetivli zero, 4, e16, mf4, tu, ma
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x half> %v, half %elt, i32 3
Expand Down Expand Up @@ -52,9 +51,8 @@ define <vscale x 2 x half> @insertelt_nxv2f16_0(<vscale x 2 x half> %v, half %el
define <vscale x 2 x half> @insertelt_nxv2f16_imm(<vscale x 2 x half> %v, half %elt) {
; CHECK-LABEL: insertelt_nxv2f16_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, ma
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x half> %v, half %elt, i32 3
Expand Down Expand Up @@ -87,9 +85,8 @@ define <vscale x 4 x half> @insertelt_nxv4f16_0(<vscale x 4 x half> %v, half %el
define <vscale x 4 x half> @insertelt_nxv4f16_imm(<vscale x 4 x half> %v, half %elt) {
; CHECK-LABEL: insertelt_nxv4f16_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, ma
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x half> %v, half %elt, i32 3
Expand Down Expand Up @@ -122,9 +119,8 @@ define <vscale x 8 x half> @insertelt_nxv8f16_0(<vscale x 8 x half> %v, half %el
define <vscale x 8 x half> @insertelt_nxv8f16_imm(<vscale x 8 x half> %v, half %elt) {
; CHECK-LABEL: insertelt_nxv8f16_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v10, fa0
; CHECK-NEXT: vsetivli zero, 4, e16, m2, tu, ma
; CHECK-NEXT: vfmv.s.f v10, fa0
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x half> %v, half %elt, i32 3
Expand Down Expand Up @@ -157,9 +153,8 @@ define <vscale x 16 x half> @insertelt_nxv16f16_0(<vscale x 16 x half> %v, half
define <vscale x 16 x half> @insertelt_nxv16f16_imm(<vscale x 16 x half> %v, half %elt) {
; CHECK-LABEL: insertelt_nxv16f16_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v12, fa0
; CHECK-NEXT: vsetivli zero, 4, e16, m4, tu, ma
; CHECK-NEXT: vfmv.s.f v12, fa0
; CHECK-NEXT: vslideup.vi v8, v12, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x half> %v, half %elt, i32 3
Expand Down Expand Up @@ -192,9 +187,8 @@ define <vscale x 32 x half> @insertelt_nxv32f16_0(<vscale x 32 x half> %v, half
define <vscale x 32 x half> @insertelt_nxv32f16_imm(<vscale x 32 x half> %v, half %elt) {
; CHECK-LABEL: insertelt_nxv32f16_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v16, fa0
; CHECK-NEXT: vsetivli zero, 4, e16, m8, tu, ma
; CHECK-NEXT: vfmv.s.f v16, fa0
; CHECK-NEXT: vslideup.vi v8, v16, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 32 x half> %v, half %elt, i32 3
Expand Down Expand Up @@ -227,9 +221,8 @@ define <vscale x 1 x float> @insertelt_nxv1f32_0(<vscale x 1 x float> %v, float
define <vscale x 1 x float> @insertelt_nxv1f32_imm(<vscale x 1 x float> %v, float %elt) {
; CHECK-LABEL: insertelt_nxv1f32_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: vsetivli zero, 4, e32, mf2, tu, ma
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x float> %v, float %elt, i32 3
Expand Down Expand Up @@ -262,9 +255,8 @@ define <vscale x 2 x float> @insertelt_nxv2f32_0(<vscale x 2 x float> %v, float
define <vscale x 2 x float> @insertelt_nxv2f32_imm(<vscale x 2 x float> %v, float %elt) {
; CHECK-LABEL: insertelt_nxv2f32_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, ma
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x float> %v, float %elt, i32 3
Expand Down Expand Up @@ -297,9 +289,8 @@ define <vscale x 4 x float> @insertelt_nxv4f32_0(<vscale x 4 x float> %v, float
define <vscale x 4 x float> @insertelt_nxv4f32_imm(<vscale x 4 x float> %v, float %elt) {
; CHECK-LABEL: insertelt_nxv4f32_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v10, fa0
; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, ma
; CHECK-NEXT: vfmv.s.f v10, fa0
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x float> %v, float %elt, i32 3
Expand Down Expand Up @@ -332,9 +323,8 @@ define <vscale x 8 x float> @insertelt_nxv8f32_0(<vscale x 8 x float> %v, float
define <vscale x 8 x float> @insertelt_nxv8f32_imm(<vscale x 8 x float> %v, float %elt) {
; CHECK-LABEL: insertelt_nxv8f32_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v12, fa0
; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, ma
; CHECK-NEXT: vfmv.s.f v12, fa0
; CHECK-NEXT: vslideup.vi v8, v12, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x float> %v, float %elt, i32 3
Expand Down Expand Up @@ -367,9 +357,8 @@ define <vscale x 16 x float> @insertelt_nxv16f32_0(<vscale x 16 x float> %v, flo
define <vscale x 16 x float> @insertelt_nxv16f32_imm(<vscale x 16 x float> %v, float %elt) {
; CHECK-LABEL: insertelt_nxv16f32_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v16, fa0
; CHECK-NEXT: vsetivli zero, 4, e32, m8, tu, ma
; CHECK-NEXT: vfmv.s.f v16, fa0
; CHECK-NEXT: vslideup.vi v8, v16, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x float> %v, float %elt, i32 3
Expand Down Expand Up @@ -402,9 +391,8 @@ define <vscale x 1 x double> @insertelt_nxv1f64_0(<vscale x 1 x double> %v, doub
define <vscale x 1 x double> @insertelt_nxv1f64_imm(<vscale x 1 x double> %v, double %elt) {
; CHECK-LABEL: insertelt_nxv1f64_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: vsetivli zero, 4, e64, m1, tu, ma
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x double> %v, double %elt, i32 3
Expand Down Expand Up @@ -437,9 +425,8 @@ define <vscale x 2 x double> @insertelt_nxv2f64_0(<vscale x 2 x double> %v, doub
define <vscale x 2 x double> @insertelt_nxv2f64_imm(<vscale x 2 x double> %v, double %elt) {
; CHECK-LABEL: insertelt_nxv2f64_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v10, fa0
; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, ma
; CHECK-NEXT: vfmv.s.f v10, fa0
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x double> %v, double %elt, i32 3
Expand Down Expand Up @@ -472,9 +459,8 @@ define <vscale x 4 x double> @insertelt_nxv4f64_0(<vscale x 4 x double> %v, doub
define <vscale x 4 x double> @insertelt_nxv4f64_imm(<vscale x 4 x double> %v, double %elt) {
; CHECK-LABEL: insertelt_nxv4f64_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v12, fa0
; CHECK-NEXT: vsetivli zero, 4, e64, m4, tu, ma
; CHECK-NEXT: vfmv.s.f v12, fa0
; CHECK-NEXT: vslideup.vi v8, v12, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x double> %v, double %elt, i32 3
Expand Down Expand Up @@ -507,9 +493,8 @@ define <vscale x 8 x double> @insertelt_nxv8f64_0(<vscale x 8 x double> %v, doub
define <vscale x 8 x double> @insertelt_nxv8f64_imm(<vscale x 8 x double> %v, double %elt) {
; CHECK-LABEL: insertelt_nxv8f64_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v16, fa0
; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, ma
; CHECK-NEXT: vfmv.s.f v16, fa0
; CHECK-NEXT: vslideup.vi v8, v16, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x double> %v, double %elt, i32 3
Expand Down
60 changes: 20 additions & 40 deletions llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,8 @@ define <vscale x 1 x i8> @insertelt_nxv1i8_0(<vscale x 1 x i8> %v, i8 signext %e
define <vscale x 1 x i8> @insertelt_nxv1i8_imm(<vscale x 1 x i8> %v, i8 signext %elt) {
; CHECK-LABEL: insertelt_nxv1i8_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vsetivli zero, 4, e8, mf8, tu, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x i8> %v, i8 %elt, i32 3
Expand Down Expand Up @@ -50,9 +49,8 @@ define <vscale x 2 x i8> @insertelt_nxv2i8_0(<vscale x 2 x i8> %v, i8 signext %e
define <vscale x 2 x i8> @insertelt_nxv2i8_imm(<vscale x 2 x i8> %v, i8 signext %elt) {
; CHECK-LABEL: insertelt_nxv2i8_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, tu, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i8> %v, i8 %elt, i32 3
Expand Down Expand Up @@ -85,9 +83,8 @@ define <vscale x 4 x i8> @insertelt_nxv4i8_0(<vscale x 4 x i8> %v, i8 signext %e
define <vscale x 4 x i8> @insertelt_nxv4i8_imm(<vscale x 4 x i8> %v, i8 signext %elt) {
; CHECK-LABEL: insertelt_nxv4i8_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x i8> %v, i8 %elt, i32 3
Expand Down Expand Up @@ -120,9 +117,8 @@ define <vscale x 8 x i8> @insertelt_nxv8i8_0(<vscale x 8 x i8> %v, i8 signext %e
define <vscale x 8 x i8> @insertelt_nxv8i8_imm(<vscale x 8 x i8> %v, i8 signext %elt) {
; CHECK-LABEL: insertelt_nxv8i8_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vsetivli zero, 4, e8, m1, tu, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x i8> %v, i8 %elt, i32 3
Expand Down Expand Up @@ -155,9 +151,8 @@ define <vscale x 16 x i8> @insertelt_nxv16i8_0(<vscale x 16 x i8> %v, i8 signext
define <vscale x 16 x i8> @insertelt_nxv16i8_imm(<vscale x 16 x i8> %v, i8 signext %elt) {
; CHECK-LABEL: insertelt_nxv16i8_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: vsetivli zero, 4, e8, m2, tu, ma
; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x i8> %v, i8 %elt, i32 3
Expand Down Expand Up @@ -190,9 +185,8 @@ define <vscale x 32 x i8> @insertelt_nxv32i8_0(<vscale x 32 x i8> %v, i8 signext
define <vscale x 32 x i8> @insertelt_nxv32i8_imm(<vscale x 32 x i8> %v, i8 signext %elt) {
; CHECK-LABEL: insertelt_nxv32i8_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.s.x v12, a0
; CHECK-NEXT: vsetivli zero, 4, e8, m4, tu, ma
; CHECK-NEXT: vmv.s.x v12, a0
; CHECK-NEXT: vslideup.vi v8, v12, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 32 x i8> %v, i8 %elt, i32 3
Expand Down Expand Up @@ -225,9 +219,8 @@ define <vscale x 64 x i8> @insertelt_nxv64i8_0(<vscale x 64 x i8> %v, i8 signext
define <vscale x 64 x i8> @insertelt_nxv64i8_imm(<vscale x 64 x i8> %v, i8 signext %elt) {
; CHECK-LABEL: insertelt_nxv64i8_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.s.x v16, a0
; CHECK-NEXT: vsetivli zero, 4, e8, m8, tu, ma
; CHECK-NEXT: vmv.s.x v16, a0
; CHECK-NEXT: vslideup.vi v8, v16, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 64 x i8> %v, i8 %elt, i32 3
Expand Down Expand Up @@ -260,9 +253,8 @@ define <vscale x 1 x i16> @insertelt_nxv1i16_0(<vscale x 1 x i16> %v, i16 signex
define <vscale x 1 x i16> @insertelt_nxv1i16_imm(<vscale x 1 x i16> %v, i16 signext %elt) {
; CHECK-LABEL: insertelt_nxv1i16_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vsetivli zero, 4, e16, mf4, tu, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x i16> %v, i16 %elt, i32 3
Expand Down Expand Up @@ -295,9 +287,8 @@ define <vscale x 2 x i16> @insertelt_nxv2i16_0(<vscale x 2 x i16> %v, i16 signex
define <vscale x 2 x i16> @insertelt_nxv2i16_imm(<vscale x 2 x i16> %v, i16 signext %elt) {
; CHECK-LABEL: insertelt_nxv2i16_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i16> %v, i16 %elt, i32 3
Expand Down Expand Up @@ -330,9 +321,8 @@ define <vscale x 4 x i16> @insertelt_nxv4i16_0(<vscale x 4 x i16> %v, i16 signex
define <vscale x 4 x i16> @insertelt_nxv4i16_imm(<vscale x 4 x i16> %v, i16 signext %elt) {
; CHECK-LABEL: insertelt_nxv4i16_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x i16> %v, i16 %elt, i32 3
Expand Down Expand Up @@ -365,9 +355,8 @@ define <vscale x 8 x i16> @insertelt_nxv8i16_0(<vscale x 8 x i16> %v, i16 signex
define <vscale x 8 x i16> @insertelt_nxv8i16_imm(<vscale x 8 x i16> %v, i16 signext %elt) {
; CHECK-LABEL: insertelt_nxv8i16_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: vsetivli zero, 4, e16, m2, tu, ma
; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x i16> %v, i16 %elt, i32 3
Expand Down Expand Up @@ -400,9 +389,8 @@ define <vscale x 16 x i16> @insertelt_nxv16i16_0(<vscale x 16 x i16> %v, i16 sig
define <vscale x 16 x i16> @insertelt_nxv16i16_imm(<vscale x 16 x i16> %v, i16 signext %elt) {
; CHECK-LABEL: insertelt_nxv16i16_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.s.x v12, a0
; CHECK-NEXT: vsetivli zero, 4, e16, m4, tu, ma
; CHECK-NEXT: vmv.s.x v12, a0
; CHECK-NEXT: vslideup.vi v8, v12, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x i16> %v, i16 %elt, i32 3
Expand Down Expand Up @@ -435,9 +423,8 @@ define <vscale x 32 x i16> @insertelt_nxv32i16_0(<vscale x 32 x i16> %v, i16 sig
define <vscale x 32 x i16> @insertelt_nxv32i16_imm(<vscale x 32 x i16> %v, i16 signext %elt) {
; CHECK-LABEL: insertelt_nxv32i16_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.s.x v16, a0
; CHECK-NEXT: vsetivli zero, 4, e16, m8, tu, ma
; CHECK-NEXT: vmv.s.x v16, a0
; CHECK-NEXT: vslideup.vi v8, v16, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 32 x i16> %v, i16 %elt, i32 3
Expand Down Expand Up @@ -470,9 +457,8 @@ define <vscale x 1 x i32> @insertelt_nxv1i32_0(<vscale x 1 x i32> %v, i32 %elt)
define <vscale x 1 x i32> @insertelt_nxv1i32_imm(<vscale x 1 x i32> %v, i32 %elt) {
; CHECK-LABEL: insertelt_nxv1i32_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vsetivli zero, 4, e32, mf2, tu, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x i32> %v, i32 %elt, i32 3
Expand Down Expand Up @@ -505,9 +491,8 @@ define <vscale x 2 x i32> @insertelt_nxv2i32_0(<vscale x 2 x i32> %v, i32 %elt)
define <vscale x 2 x i32> @insertelt_nxv2i32_imm(<vscale x 2 x i32> %v, i32 %elt) {
; CHECK-LABEL: insertelt_nxv2i32_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i32> %v, i32 %elt, i32 3
Expand Down Expand Up @@ -540,9 +525,8 @@ define <vscale x 4 x i32> @insertelt_nxv4i32_0(<vscale x 4 x i32> %v, i32 %elt)
define <vscale x 4 x i32> @insertelt_nxv4i32_imm(<vscale x 4 x i32> %v, i32 %elt) {
; CHECK-LABEL: insertelt_nxv4i32_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, ma
; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x i32> %v, i32 %elt, i32 3
Expand Down Expand Up @@ -575,9 +559,8 @@ define <vscale x 8 x i32> @insertelt_nxv8i32_0(<vscale x 8 x i32> %v, i32 %elt)
define <vscale x 8 x i32> @insertelt_nxv8i32_imm(<vscale x 8 x i32> %v, i32 %elt) {
; CHECK-LABEL: insertelt_nxv8i32_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
; CHECK-NEXT: vmv.s.x v12, a0
; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, ma
; CHECK-NEXT: vmv.s.x v12, a0
; CHECK-NEXT: vslideup.vi v8, v12, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x i32> %v, i32 %elt, i32 3
Expand Down Expand Up @@ -610,9 +593,8 @@ define <vscale x 16 x i32> @insertelt_nxv16i32_0(<vscale x 16 x i32> %v, i32 %el
define <vscale x 16 x i32> @insertelt_nxv16i32_imm(<vscale x 16 x i32> %v, i32 %elt) {
; CHECK-LABEL: insertelt_nxv16i32_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
; CHECK-NEXT: vmv.s.x v16, a0
; CHECK-NEXT: vsetivli zero, 4, e32, m8, tu, ma
; CHECK-NEXT: vmv.s.x v16, a0
; CHECK-NEXT: vslideup.vi v8, v16, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x i32> %v, i32 %elt, i32 3
Expand Down Expand Up @@ -800,9 +782,8 @@ define <vscale x 2 x i64> @insertelt_nxv2i64_imm_c10(<vscale x 2 x i64> %v) {
; CHECK-LABEL: insertelt_nxv2i64_imm_c10:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 10
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, ma
; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i64> %v, i64 10, i32 3
Expand Down Expand Up @@ -838,9 +819,8 @@ define <vscale x 2 x i64> @insertelt_nxv2i64_imm_cn1(<vscale x 2 x i64> %v) {
; CHECK-LABEL: insertelt_nxv2i64_imm_cn1:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, -1
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, ma
; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i64> %v, i64 -1, i32 3
Expand Down
66 changes: 22 additions & 44 deletions llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,8 @@ define <vscale x 1 x i8> @insertelt_nxv1i8_0(<vscale x 1 x i8> %v, i8 signext %e
define <vscale x 1 x i8> @insertelt_nxv1i8_imm(<vscale x 1 x i8> %v, i8 signext %elt) {
; CHECK-LABEL: insertelt_nxv1i8_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vsetivli zero, 4, e8, mf8, tu, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x i8> %v, i8 %elt, i32 3
Expand Down Expand Up @@ -50,9 +49,8 @@ define <vscale x 2 x i8> @insertelt_nxv2i8_0(<vscale x 2 x i8> %v, i8 signext %e
define <vscale x 2 x i8> @insertelt_nxv2i8_imm(<vscale x 2 x i8> %v, i8 signext %elt) {
; CHECK-LABEL: insertelt_nxv2i8_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, tu, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i8> %v, i8 %elt, i32 3
Expand Down Expand Up @@ -85,9 +83,8 @@ define <vscale x 4 x i8> @insertelt_nxv4i8_0(<vscale x 4 x i8> %v, i8 signext %e
define <vscale x 4 x i8> @insertelt_nxv4i8_imm(<vscale x 4 x i8> %v, i8 signext %elt) {
; CHECK-LABEL: insertelt_nxv4i8_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x i8> %v, i8 %elt, i32 3
Expand Down Expand Up @@ -120,9 +117,8 @@ define <vscale x 8 x i8> @insertelt_nxv8i8_0(<vscale x 8 x i8> %v, i8 signext %e
define <vscale x 8 x i8> @insertelt_nxv8i8_imm(<vscale x 8 x i8> %v, i8 signext %elt) {
; CHECK-LABEL: insertelt_nxv8i8_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vsetivli zero, 4, e8, m1, tu, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x i8> %v, i8 %elt, i32 3
Expand Down Expand Up @@ -155,9 +151,8 @@ define <vscale x 16 x i8> @insertelt_nxv16i8_0(<vscale x 16 x i8> %v, i8 signext
define <vscale x 16 x i8> @insertelt_nxv16i8_imm(<vscale x 16 x i8> %v, i8 signext %elt) {
; CHECK-LABEL: insertelt_nxv16i8_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: vsetivli zero, 4, e8, m2, tu, ma
; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x i8> %v, i8 %elt, i32 3
Expand Down Expand Up @@ -190,9 +185,8 @@ define <vscale x 32 x i8> @insertelt_nxv32i8_0(<vscale x 32 x i8> %v, i8 signext
define <vscale x 32 x i8> @insertelt_nxv32i8_imm(<vscale x 32 x i8> %v, i8 signext %elt) {
; CHECK-LABEL: insertelt_nxv32i8_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.s.x v12, a0
; CHECK-NEXT: vsetivli zero, 4, e8, m4, tu, ma
; CHECK-NEXT: vmv.s.x v12, a0
; CHECK-NEXT: vslideup.vi v8, v12, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 32 x i8> %v, i8 %elt, i32 3
Expand Down Expand Up @@ -225,9 +219,8 @@ define <vscale x 64 x i8> @insertelt_nxv64i8_0(<vscale x 64 x i8> %v, i8 signext
define <vscale x 64 x i8> @insertelt_nxv64i8_imm(<vscale x 64 x i8> %v, i8 signext %elt) {
; CHECK-LABEL: insertelt_nxv64i8_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vmv.s.x v16, a0
; CHECK-NEXT: vsetivli zero, 4, e8, m8, tu, ma
; CHECK-NEXT: vmv.s.x v16, a0
; CHECK-NEXT: vslideup.vi v8, v16, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 64 x i8> %v, i8 %elt, i32 3
Expand Down Expand Up @@ -260,9 +253,8 @@ define <vscale x 1 x i16> @insertelt_nxv1i16_0(<vscale x 1 x i16> %v, i16 signex
define <vscale x 1 x i16> @insertelt_nxv1i16_imm(<vscale x 1 x i16> %v, i16 signext %elt) {
; CHECK-LABEL: insertelt_nxv1i16_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vsetivli zero, 4, e16, mf4, tu, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x i16> %v, i16 %elt, i32 3
Expand Down Expand Up @@ -295,9 +287,8 @@ define <vscale x 2 x i16> @insertelt_nxv2i16_0(<vscale x 2 x i16> %v, i16 signex
define <vscale x 2 x i16> @insertelt_nxv2i16_imm(<vscale x 2 x i16> %v, i16 signext %elt) {
; CHECK-LABEL: insertelt_nxv2i16_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i16> %v, i16 %elt, i32 3
Expand Down Expand Up @@ -330,9 +321,8 @@ define <vscale x 4 x i16> @insertelt_nxv4i16_0(<vscale x 4 x i16> %v, i16 signex
define <vscale x 4 x i16> @insertelt_nxv4i16_imm(<vscale x 4 x i16> %v, i16 signext %elt) {
; CHECK-LABEL: insertelt_nxv4i16_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x i16> %v, i16 %elt, i32 3
Expand Down Expand Up @@ -365,9 +355,8 @@ define <vscale x 8 x i16> @insertelt_nxv8i16_0(<vscale x 8 x i16> %v, i16 signex
define <vscale x 8 x i16> @insertelt_nxv8i16_imm(<vscale x 8 x i16> %v, i16 signext %elt) {
; CHECK-LABEL: insertelt_nxv8i16_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: vsetivli zero, 4, e16, m2, tu, ma
; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x i16> %v, i16 %elt, i32 3
Expand Down Expand Up @@ -400,9 +389,8 @@ define <vscale x 16 x i16> @insertelt_nxv16i16_0(<vscale x 16 x i16> %v, i16 sig
define <vscale x 16 x i16> @insertelt_nxv16i16_imm(<vscale x 16 x i16> %v, i16 signext %elt) {
; CHECK-LABEL: insertelt_nxv16i16_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.s.x v12, a0
; CHECK-NEXT: vsetivli zero, 4, e16, m4, tu, ma
; CHECK-NEXT: vmv.s.x v12, a0
; CHECK-NEXT: vslideup.vi v8, v12, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x i16> %v, i16 %elt, i32 3
Expand Down Expand Up @@ -435,9 +423,8 @@ define <vscale x 32 x i16> @insertelt_nxv32i16_0(<vscale x 32 x i16> %v, i16 sig
define <vscale x 32 x i16> @insertelt_nxv32i16_imm(<vscale x 32 x i16> %v, i16 signext %elt) {
; CHECK-LABEL: insertelt_nxv32i16_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv.s.x v16, a0
; CHECK-NEXT: vsetivli zero, 4, e16, m8, tu, ma
; CHECK-NEXT: vmv.s.x v16, a0
; CHECK-NEXT: vslideup.vi v8, v16, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 32 x i16> %v, i16 %elt, i32 3
Expand Down Expand Up @@ -470,9 +457,8 @@ define <vscale x 1 x i32> @insertelt_nxv1i32_0(<vscale x 1 x i32> %v, i32 signex
define <vscale x 1 x i32> @insertelt_nxv1i32_imm(<vscale x 1 x i32> %v, i32 signext %elt) {
; CHECK-LABEL: insertelt_nxv1i32_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vsetivli zero, 4, e32, mf2, tu, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x i32> %v, i32 %elt, i32 3
Expand Down Expand Up @@ -505,9 +491,8 @@ define <vscale x 2 x i32> @insertelt_nxv2i32_0(<vscale x 2 x i32> %v, i32 signex
define <vscale x 2 x i32> @insertelt_nxv2i32_imm(<vscale x 2 x i32> %v, i32 signext %elt) {
; CHECK-LABEL: insertelt_nxv2i32_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i32> %v, i32 %elt, i32 3
Expand Down Expand Up @@ -540,9 +525,8 @@ define <vscale x 4 x i32> @insertelt_nxv4i32_0(<vscale x 4 x i32> %v, i32 signex
define <vscale x 4 x i32> @insertelt_nxv4i32_imm(<vscale x 4 x i32> %v, i32 signext %elt) {
; CHECK-LABEL: insertelt_nxv4i32_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: vsetivli zero, 4, e32, m2, tu, ma
; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x i32> %v, i32 %elt, i32 3
Expand Down Expand Up @@ -575,9 +559,8 @@ define <vscale x 8 x i32> @insertelt_nxv8i32_0(<vscale x 8 x i32> %v, i32 signex
define <vscale x 8 x i32> @insertelt_nxv8i32_imm(<vscale x 8 x i32> %v, i32 signext %elt) {
; CHECK-LABEL: insertelt_nxv8i32_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
; CHECK-NEXT: vmv.s.x v12, a0
; CHECK-NEXT: vsetivli zero, 4, e32, m4, tu, ma
; CHECK-NEXT: vmv.s.x v12, a0
; CHECK-NEXT: vslideup.vi v8, v12, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x i32> %v, i32 %elt, i32 3
Expand Down Expand Up @@ -610,9 +593,8 @@ define <vscale x 16 x i32> @insertelt_nxv16i32_0(<vscale x 16 x i32> %v, i32 sig
define <vscale x 16 x i32> @insertelt_nxv16i32_imm(<vscale x 16 x i32> %v, i32 signext %elt) {
; CHECK-LABEL: insertelt_nxv16i32_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
; CHECK-NEXT: vmv.s.x v16, a0
; CHECK-NEXT: vsetivli zero, 4, e32, m8, tu, ma
; CHECK-NEXT: vmv.s.x v16, a0
; CHECK-NEXT: vslideup.vi v8, v16, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x i32> %v, i32 %elt, i32 3
Expand Down Expand Up @@ -645,9 +627,8 @@ define <vscale x 1 x i64> @insertelt_nxv1i64_0(<vscale x 1 x i64> %v, i64 %elt)
define <vscale x 1 x i64> @insertelt_nxv1i64_imm(<vscale x 1 x i64> %v, i64 %elt) {
; CHECK-LABEL: insertelt_nxv1i64_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vsetivli zero, 4, e64, m1, tu, ma
; CHECK-NEXT: vmv.s.x v9, a0
; CHECK-NEXT: vslideup.vi v8, v9, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x i64> %v, i64 %elt, i32 3
Expand Down Expand Up @@ -682,9 +663,8 @@ define <vscale x 2 x i64> @insertelt_nxv2i64_0(<vscale x 2 x i64> %v, i64 %elt)
define <vscale x 2 x i64> @insertelt_nxv2i64_imm(<vscale x 2 x i64> %v, i64 %elt) {
; CHECK-LABEL: insertelt_nxv2i64_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: vsetivli zero, 4, e64, m2, tu, ma
; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i64> %v, i64 %elt, i32 3
Expand Down Expand Up @@ -719,9 +699,8 @@ define <vscale x 4 x i64> @insertelt_nxv4i64_0(<vscale x 4 x i64> %v, i64 %elt)
define <vscale x 4 x i64> @insertelt_nxv4i64_imm(<vscale x 4 x i64> %v, i64 %elt) {
; CHECK-LABEL: insertelt_nxv4i64_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; CHECK-NEXT: vmv.s.x v12, a0
; CHECK-NEXT: vsetivli zero, 4, e64, m4, tu, ma
; CHECK-NEXT: vmv.s.x v12, a0
; CHECK-NEXT: vslideup.vi v8, v12, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x i64> %v, i64 %elt, i32 3
Expand Down Expand Up @@ -756,9 +735,8 @@ define <vscale x 8 x i64> @insertelt_nxv8i64_0(<vscale x 8 x i64> %v, i64 %elt)
define <vscale x 8 x i64> @insertelt_nxv8i64_imm(<vscale x 8 x i64> %v, i64 %elt) {
; CHECK-LABEL: insertelt_nxv8i64_imm:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; CHECK-NEXT: vmv.s.x v16, a0
; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, ma
; CHECK-NEXT: vmv.s.x v16, a0
; CHECK-NEXT: vslideup.vi v8, v16, 3
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x i64> %v, i64 %elt, i32 3
Expand Down