792 changes: 396 additions & 396 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-setcc.ll

Large diffs are not rendered by default.

80 changes: 40 additions & 40 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
Original file line number Diff line number Diff line change
Expand Up @@ -86,20 +86,20 @@ define <4 x double> @vrgather_permute_shuffle_vu_v4f64(<4 x double> %x) {
; RV32-NEXT: lui a0, %hi(.LCPI4_0)
; RV32-NEXT: addi a0, a0, %lo(.LCPI4_0)
; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; RV32-NEXT: vle16.v v25, (a0)
; RV32-NEXT: vle16.v v12, (a0)
; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; RV32-NEXT: vrgatherei16.vv v26, v8, v25
; RV32-NEXT: vmv2r.v v8, v26
; RV32-NEXT: vrgatherei16.vv v10, v8, v12
; RV32-NEXT: vmv2r.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: vrgather_permute_shuffle_vu_v4f64:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, %hi(.LCPI4_0)
; RV64-NEXT: addi a0, a0, %lo(.LCPI4_0)
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV64-NEXT: vle64.v v28, (a0)
; RV64-NEXT: vrgather.vv v26, v8, v28
; RV64-NEXT: vmv2r.v v8, v26
; RV64-NEXT: vle64.v v12, (a0)
; RV64-NEXT: vrgather.vv v10, v8, v12
; RV64-NEXT: vmv2r.v v8, v10
; RV64-NEXT: ret
%s = shufflevector <4 x double> %x, <4 x double> undef, <4 x i32> <i32 1, i32 2, i32 0, i32 1>
ret <4 x double> %s
Expand All @@ -111,20 +111,20 @@ define <4 x double> @vrgather_permute_shuffle_uv_v4f64(<4 x double> %x) {
; RV32-NEXT: lui a0, %hi(.LCPI5_0)
; RV32-NEXT: addi a0, a0, %lo(.LCPI5_0)
; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; RV32-NEXT: vle16.v v25, (a0)
; RV32-NEXT: vle16.v v12, (a0)
; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; RV32-NEXT: vrgatherei16.vv v26, v8, v25
; RV32-NEXT: vmv2r.v v8, v26
; RV32-NEXT: vrgatherei16.vv v10, v8, v12
; RV32-NEXT: vmv2r.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: vrgather_permute_shuffle_uv_v4f64:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, %hi(.LCPI5_0)
; RV64-NEXT: addi a0, a0, %lo(.LCPI5_0)
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV64-NEXT: vle64.v v28, (a0)
; RV64-NEXT: vrgather.vv v26, v8, v28
; RV64-NEXT: vmv2r.v v8, v26
; RV64-NEXT: vle64.v v12, (a0)
; RV64-NEXT: vrgather.vv v10, v8, v12
; RV64-NEXT: vmv2r.v v8, v10
; RV64-NEXT: ret
%s = shufflevector <4 x double> undef, <4 x double> %x, <4 x i32> <i32 5, i32 6, i32 4, i32 5>
ret <4 x double> %s
Expand All @@ -136,30 +136,30 @@ define <4 x double> @vrgather_shuffle_vv_v4f64(<4 x double> %x, <4 x double> %y)
; RV32-NEXT: lui a0, %hi(.LCPI6_0)
; RV32-NEXT: addi a0, a0, %lo(.LCPI6_0)
; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; RV32-NEXT: vle16.v v25, (a0)
; RV32-NEXT: vle16.v v14, (a0)
; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; RV32-NEXT: vrgatherei16.vv v26, v8, v25
; RV32-NEXT: vrgatherei16.vv v12, v8, v14
; RV32-NEXT: addi a0, zero, 8
; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; RV32-NEXT: vmv.s.x v0, a0
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: vrgather.vi v26, v10, 1, v0.t
; RV32-NEXT: vmv2r.v v8, v26
; RV32-NEXT: vrgather.vi v12, v10, 1, v0.t
; RV32-NEXT: vmv2r.v v8, v12
; RV32-NEXT: ret
;
; RV64-LABEL: vrgather_shuffle_vv_v4f64:
; RV64: # %bb.0:
; RV64-NEXT: lui a0, %hi(.LCPI6_0)
; RV64-NEXT: addi a0, a0, %lo(.LCPI6_0)
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV64-NEXT: vle64.v v28, (a0)
; RV64-NEXT: vrgather.vv v26, v8, v28
; RV64-NEXT: vle64.v v14, (a0)
; RV64-NEXT: vrgather.vv v12, v8, v14
; RV64-NEXT: addi a0, zero, 8
; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; RV64-NEXT: vmv.s.x v0, a0
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV64-NEXT: vrgather.vi v26, v10, 1, v0.t
; RV64-NEXT: vmv2r.v v8, v26
; RV64-NEXT: vrgather.vi v12, v10, 1, v0.t
; RV64-NEXT: vmv2r.v v8, v12
; RV64-NEXT: ret
%s = shufflevector <4 x double> %x, <4 x double> %y, <4 x i32> <i32 1, i32 2, i32 0, i32 5>
ret <4 x double> %s
Expand All @@ -174,12 +174,12 @@ define <4 x double> @vrgather_shuffle_xv_v4f64(<4 x double> %x) {
; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; RV32-NEXT: lui a0, %hi(.LCPI7_0)
; RV32-NEXT: addi a0, a0, %lo(.LCPI7_0)
; RV32-NEXT: vlse64.v v26, (a0), zero
; RV32-NEXT: vid.v v25
; RV32-NEXT: vrsub.vi v25, v25, 4
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vid.v v12
; RV32-NEXT: vrsub.vi v12, v12, 4
; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; RV32-NEXT: vrgatherei16.vv v26, v8, v25, v0.t
; RV32-NEXT: vmv2r.v v8, v26
; RV32-NEXT: vrgatherei16.vv v10, v8, v12, v0.t
; RV32-NEXT: vmv2r.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: vrgather_shuffle_xv_v4f64:
Expand All @@ -190,11 +190,11 @@ define <4 x double> @vrgather_shuffle_xv_v4f64(<4 x double> %x) {
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV64-NEXT: lui a0, %hi(.LCPI7_0)
; RV64-NEXT: addi a0, a0, %lo(.LCPI7_0)
; RV64-NEXT: vlse64.v v26, (a0), zero
; RV64-NEXT: vid.v v28
; RV64-NEXT: vrsub.vi v28, v28, 4
; RV64-NEXT: vrgather.vv v26, v8, v28, v0.t
; RV64-NEXT: vmv2r.v v8, v26
; RV64-NEXT: vlse64.v v10, (a0), zero
; RV64-NEXT: vid.v v12
; RV64-NEXT: vrsub.vi v12, v12, 4
; RV64-NEXT: vrgather.vv v10, v8, v12, v0.t
; RV64-NEXT: vmv2r.v v8, v10
; RV64-NEXT: ret
%s = shufflevector <4 x double> <double 2.0, double 2.0, double 2.0, double 2.0>, <4 x double> %x, <4 x i32> <i32 0, i32 3, i32 6, i32 5>
ret <4 x double> %s
Expand All @@ -204,33 +204,33 @@ define <4 x double> @vrgather_shuffle_vx_v4f64(<4 x double> %x) {
; RV32-LABEL: vrgather_shuffle_vx_v4f64:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; RV32-NEXT: vid.v v25
; RV32-NEXT: vid.v v10
; RV32-NEXT: addi a0, zero, 3
; RV32-NEXT: vmul.vx v25, v25, a0
; RV32-NEXT: vmul.vx v12, v10, a0
; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; RV32-NEXT: vmv.s.x v0, a0
; RV32-NEXT: lui a0, %hi(.LCPI8_0)
; RV32-NEXT: addi a0, a0, %lo(.LCPI8_0)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v26, (a0), zero
; RV32-NEXT: vrgatherei16.vv v26, v8, v25, v0.t
; RV32-NEXT: vmv2r.v v8, v26
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vrgatherei16.vv v10, v8, v12, v0.t
; RV32-NEXT: vmv2r.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: vrgather_shuffle_vx_v4f64:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV64-NEXT: vid.v v26
; RV64-NEXT: vid.v v10
; RV64-NEXT: addi a0, zero, 3
; RV64-NEXT: vmul.vx v28, v26, a0
; RV64-NEXT: vmul.vx v12, v10, a0
; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; RV64-NEXT: vmv.s.x v0, a0
; RV64-NEXT: lui a0, %hi(.LCPI8_0)
; RV64-NEXT: addi a0, a0, %lo(.LCPI8_0)
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV64-NEXT: vlse64.v v26, (a0), zero
; RV64-NEXT: vrgather.vv v26, v8, v28, v0.t
; RV64-NEXT: vmv2r.v v8, v26
; RV64-NEXT: vlse64.v v10, (a0), zero
; RV64-NEXT: vrgather.vv v10, v8, v12, v0.t
; RV64-NEXT: vmv2r.v v8, v10
; RV64-NEXT: ret
%s = shufflevector <4 x double> %x, <4 x double> <double 2.0, double 2.0, double 2.0, double 2.0>, <4 x i32> <i32 0, i32 3, i32 6, i32 5>
ret <4 x double> %s
Expand Down
84 changes: 42 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-splat.ll
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@ define void @splat_v8f16(<8 x half>* %x, half %y) {
; CHECK-LABEL: splat_v8f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vfmv.v.f v25, fa0
; CHECK-NEXT: vse16.v v25, (a0)
; CHECK-NEXT: vfmv.v.f v8, fa0
; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <8 x half> undef, half %y, i32 0
%b = shufflevector <8 x half> %a, <8 x half> undef, <8 x i32> zeroinitializer
Expand All @@ -21,8 +21,8 @@ define void @splat_v4f32(<4 x float>* %x, float %y) {
; CHECK-LABEL: splat_v4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vfmv.v.f v25, fa0
; CHECK-NEXT: vse32.v v25, (a0)
; CHECK-NEXT: vfmv.v.f v8, fa0
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <4 x float> undef, float %y, i32 0
%b = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> zeroinitializer
Expand All @@ -34,8 +34,8 @@ define void @splat_v2f64(<2 x double>* %x, double %y) {
; CHECK-LABEL: splat_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; CHECK-NEXT: vfmv.v.f v25, fa0
; CHECK-NEXT: vse64.v v25, (a0)
; CHECK-NEXT: vfmv.v.f v8, fa0
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <2 x double> undef, double %y, i32 0
%b = shufflevector <2 x double> %a, <2 x double> undef, <2 x i32> zeroinitializer
Expand All @@ -47,17 +47,17 @@ define void @splat_16f16(<16 x half>* %x, half %y) {
; LMULMAX2-LABEL: splat_16f16:
; LMULMAX2: # %bb.0:
; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu
; LMULMAX2-NEXT: vfmv.v.f v26, fa0
; LMULMAX2-NEXT: vse16.v v26, (a0)
; LMULMAX2-NEXT: vfmv.v.f v8, fa0
; LMULMAX2-NEXT: vse16.v v8, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: splat_16f16:
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; LMULMAX1-NEXT: vfmv.v.f v25, fa0
; LMULMAX1-NEXT: vfmv.v.f v8, fa0
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse16.v v25, (a1)
; LMULMAX1-NEXT: vse16.v v25, (a0)
; LMULMAX1-NEXT: vse16.v v8, (a1)
; LMULMAX1-NEXT: vse16.v v8, (a0)
; LMULMAX1-NEXT: ret
%a = insertelement <16 x half> undef, half %y, i32 0
%b = shufflevector <16 x half> %a, <16 x half> undef, <16 x i32> zeroinitializer
Expand All @@ -69,17 +69,17 @@ define void @splat_v8f32(<8 x float>* %x, float %y) {
; LMULMAX2-LABEL: splat_v8f32:
; LMULMAX2: # %bb.0:
; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; LMULMAX2-NEXT: vfmv.v.f v26, fa0
; LMULMAX2-NEXT: vse32.v v26, (a0)
; LMULMAX2-NEXT: vfmv.v.f v8, fa0
; LMULMAX2-NEXT: vse32.v v8, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: splat_v8f32:
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; LMULMAX1-NEXT: vfmv.v.f v25, fa0
; LMULMAX1-NEXT: vfmv.v.f v8, fa0
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse32.v v25, (a1)
; LMULMAX1-NEXT: vse32.v v25, (a0)
; LMULMAX1-NEXT: vse32.v v8, (a1)
; LMULMAX1-NEXT: vse32.v v8, (a0)
; LMULMAX1-NEXT: ret
%a = insertelement <8 x float> undef, float %y, i32 0
%b = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> zeroinitializer
Expand All @@ -91,17 +91,17 @@ define void @splat_v4f64(<4 x double>* %x, double %y) {
; LMULMAX2-LABEL: splat_v4f64:
; LMULMAX2: # %bb.0:
; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; LMULMAX2-NEXT: vfmv.v.f v26, fa0
; LMULMAX2-NEXT: vse64.v v26, (a0)
; LMULMAX2-NEXT: vfmv.v.f v8, fa0
; LMULMAX2-NEXT: vse64.v v8, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: splat_v4f64:
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; LMULMAX1-NEXT: vfmv.v.f v25, fa0
; LMULMAX1-NEXT: vfmv.v.f v8, fa0
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse64.v v25, (a1)
; LMULMAX1-NEXT: vse64.v v25, (a0)
; LMULMAX1-NEXT: vse64.v v8, (a1)
; LMULMAX1-NEXT: vse64.v v8, (a0)
; LMULMAX1-NEXT: ret
%a = insertelement <4 x double> undef, double %y, i32 0
%b = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> zeroinitializer
Expand All @@ -113,8 +113,8 @@ define void @splat_zero_v8f16(<8 x half>* %x) {
; CHECK-LABEL: splat_zero_v8f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vmv.v.i v25, 0
; CHECK-NEXT: vse16.v v25, (a0)
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <8 x half> undef, half 0.0, i32 0
%b = shufflevector <8 x half> %a, <8 x half> undef, <8 x i32> zeroinitializer
Expand All @@ -126,8 +126,8 @@ define void @splat_zero_v4f32(<4 x float>* %x) {
; CHECK-LABEL: splat_zero_v4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vmv.v.i v25, 0
; CHECK-NEXT: vse32.v v25, (a0)
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <4 x float> undef, float 0.0, i32 0
%b = shufflevector <4 x float> %a, <4 x float> undef, <4 x i32> zeroinitializer
Expand All @@ -139,8 +139,8 @@ define void @splat_zero_v2f64(<2 x double>* %x) {
; CHECK-LABEL: splat_zero_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; CHECK-NEXT: vmv.v.i v25, 0
; CHECK-NEXT: vse64.v v25, (a0)
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <2 x double> undef, double 0.0, i32 0
%b = shufflevector <2 x double> %a, <2 x double> undef, <2 x i32> zeroinitializer
Expand All @@ -152,17 +152,17 @@ define void @splat_zero_16f16(<16 x half>* %x) {
; LMULMAX2-LABEL: splat_zero_16f16:
; LMULMAX2: # %bb.0:
; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu
; LMULMAX2-NEXT: vmv.v.i v26, 0
; LMULMAX2-NEXT: vse16.v v26, (a0)
; LMULMAX2-NEXT: vmv.v.i v8, 0
; LMULMAX2-NEXT: vse16.v v8, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: splat_zero_16f16:
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; LMULMAX1-NEXT: vmv.v.i v25, 0
; LMULMAX1-NEXT: vse16.v v25, (a0)
; LMULMAX1-NEXT: vmv.v.i v8, 0
; LMULMAX1-NEXT: vse16.v v8, (a0)
; LMULMAX1-NEXT: addi a0, a0, 16
; LMULMAX1-NEXT: vse16.v v25, (a0)
; LMULMAX1-NEXT: vse16.v v8, (a0)
; LMULMAX1-NEXT: ret
%a = insertelement <16 x half> undef, half 0.0, i32 0
%b = shufflevector <16 x half> %a, <16 x half> undef, <16 x i32> zeroinitializer
Expand All @@ -174,17 +174,17 @@ define void @splat_zero_v8f32(<8 x float>* %x) {
; LMULMAX2-LABEL: splat_zero_v8f32:
; LMULMAX2: # %bb.0:
; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; LMULMAX2-NEXT: vmv.v.i v26, 0
; LMULMAX2-NEXT: vse32.v v26, (a0)
; LMULMAX2-NEXT: vmv.v.i v8, 0
; LMULMAX2-NEXT: vse32.v v8, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: splat_zero_v8f32:
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; LMULMAX1-NEXT: vmv.v.i v25, 0
; LMULMAX1-NEXT: vse32.v v25, (a0)
; LMULMAX1-NEXT: vmv.v.i v8, 0
; LMULMAX1-NEXT: vse32.v v8, (a0)
; LMULMAX1-NEXT: addi a0, a0, 16
; LMULMAX1-NEXT: vse32.v v25, (a0)
; LMULMAX1-NEXT: vse32.v v8, (a0)
; LMULMAX1-NEXT: ret
%a = insertelement <8 x float> undef, float 0.0, i32 0
%b = shufflevector <8 x float> %a, <8 x float> undef, <8 x i32> zeroinitializer
Expand All @@ -196,17 +196,17 @@ define void @splat_zero_v4f64(<4 x double>* %x) {
; LMULMAX2-LABEL: splat_zero_v4f64:
; LMULMAX2: # %bb.0:
; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; LMULMAX2-NEXT: vmv.v.i v26, 0
; LMULMAX2-NEXT: vse64.v v26, (a0)
; LMULMAX2-NEXT: vmv.v.i v8, 0
; LMULMAX2-NEXT: vse64.v v8, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: splat_zero_v4f64:
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; LMULMAX1-NEXT: vmv.v.i v25, 0
; LMULMAX1-NEXT: vse64.v v25, (a0)
; LMULMAX1-NEXT: vmv.v.i v8, 0
; LMULMAX1-NEXT: vse64.v v8, (a0)
; LMULMAX1-NEXT: addi a0, a0, 16
; LMULMAX1-NEXT: vse64.v v25, (a0)
; LMULMAX1-NEXT: vse64.v v8, (a0)
; LMULMAX1-NEXT: ret
%a = insertelement <4 x double> undef, double 0.0, i32 0
%b = shufflevector <4 x double> %a, <4 x double> undef, <4 x i32> zeroinitializer
Expand Down
66 changes: 33 additions & 33 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-vrgather.ll
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@ define void @gather_const_v8f16(<8 x half>* %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: addi a1, a0, 10
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vlse16.v v25, (a1), zero
; CHECK-NEXT: vse16.v v25, (a0)
; CHECK-NEXT: vlse16.v v8, (a1), zero
; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: ret
%a = load <8 x half>, <8 x half>* %x
%b = extractelement <8 x half> %a, i32 5
Expand All @@ -25,8 +25,8 @@ define void @gather_const_v4f32(<4 x float>* %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: addi a1, a0, 8
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vlse32.v v25, (a1), zero
; CHECK-NEXT: vse32.v v25, (a0)
; CHECK-NEXT: vlse32.v v8, (a1), zero
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
%a = load <4 x float>, <4 x float>* %x
%b = extractelement <4 x float> %a, i32 2
Expand All @@ -40,8 +40,8 @@ define void @gather_const_v2f64(<2 x double>* %x) {
; CHECK-LABEL: gather_const_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; CHECK-NEXT: vlse64.v v25, (a0), zero
; CHECK-NEXT: vse64.v v25, (a0)
; CHECK-NEXT: vlse64.v v8, (a0), zero
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: ret
%a = load <2 x double>, <2 x double>* %x
%b = extractelement <2 x double> %a, i32 0
Expand Down Expand Up @@ -69,18 +69,18 @@ define void @gather_const_v64f16(<64 x half>* %x) {
; LMULMAX1-NEXT: addi a4, a0, 80
; LMULMAX1-NEXT: addi a5, a0, 94
; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; LMULMAX1-NEXT: vlse16.v v25, (a5), zero
; LMULMAX1-NEXT: vlse16.v v8, (a5), zero
; LMULMAX1-NEXT: addi a5, a0, 64
; LMULMAX1-NEXT: addi a1, a0, 112
; LMULMAX1-NEXT: addi a2, a0, 96
; LMULMAX1-NEXT: vse16.v v25, (a2)
; LMULMAX1-NEXT: vse16.v v25, (a1)
; LMULMAX1-NEXT: vse16.v v25, (a5)
; LMULMAX1-NEXT: vse16.v v25, (a4)
; LMULMAX1-NEXT: vse16.v v25, (a3)
; LMULMAX1-NEXT: vse16.v v25, (a7)
; LMULMAX1-NEXT: vse16.v v25, (a0)
; LMULMAX1-NEXT: vse16.v v25, (a6)
; LMULMAX1-NEXT: vse16.v v8, (a2)
; LMULMAX1-NEXT: vse16.v v8, (a1)
; LMULMAX1-NEXT: vse16.v v8, (a5)
; LMULMAX1-NEXT: vse16.v v8, (a4)
; LMULMAX1-NEXT: vse16.v v8, (a3)
; LMULMAX1-NEXT: vse16.v v8, (a7)
; LMULMAX1-NEXT: vse16.v v8, (a0)
; LMULMAX1-NEXT: vse16.v v8, (a6)
; LMULMAX1-NEXT: ret
%a = load <64 x half>, <64 x half>* %x
%b = extractelement <64 x half> %a, i32 47
Expand Down Expand Up @@ -108,18 +108,18 @@ define void @gather_const_v32f32(<32 x float>* %x) {
; LMULMAX1-NEXT: addi a4, a0, 80
; LMULMAX1-NEXT: addi a5, a0, 68
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; LMULMAX1-NEXT: vlse32.v v25, (a5), zero
; LMULMAX1-NEXT: vlse32.v v8, (a5), zero
; LMULMAX1-NEXT: addi a5, a0, 64
; LMULMAX1-NEXT: addi a1, a0, 112
; LMULMAX1-NEXT: addi a2, a0, 96
; LMULMAX1-NEXT: vse32.v v25, (a2)
; LMULMAX1-NEXT: vse32.v v25, (a1)
; LMULMAX1-NEXT: vse32.v v25, (a5)
; LMULMAX1-NEXT: vse32.v v25, (a4)
; LMULMAX1-NEXT: vse32.v v25, (a3)
; LMULMAX1-NEXT: vse32.v v25, (a7)
; LMULMAX1-NEXT: vse32.v v25, (a0)
; LMULMAX1-NEXT: vse32.v v25, (a6)
; LMULMAX1-NEXT: vse32.v v8, (a2)
; LMULMAX1-NEXT: vse32.v v8, (a1)
; LMULMAX1-NEXT: vse32.v v8, (a5)
; LMULMAX1-NEXT: vse32.v v8, (a4)
; LMULMAX1-NEXT: vse32.v v8, (a3)
; LMULMAX1-NEXT: vse32.v v8, (a7)
; LMULMAX1-NEXT: vse32.v v8, (a0)
; LMULMAX1-NEXT: vse32.v v8, (a6)
; LMULMAX1-NEXT: ret
%a = load <32 x float>, <32 x float>* %x
%b = extractelement <32 x float> %a, i32 17
Expand All @@ -145,18 +145,18 @@ define void @gather_const_v16f64(<16 x double>* %x) {
; LMULMAX1-NEXT: addi a3, a0, 32
; LMULMAX1-NEXT: addi a4, a0, 80
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; LMULMAX1-NEXT: vlse64.v v25, (a4), zero
; LMULMAX1-NEXT: vlse64.v v8, (a4), zero
; LMULMAX1-NEXT: addi a5, a0, 64
; LMULMAX1-NEXT: addi a1, a0, 112
; LMULMAX1-NEXT: addi a2, a0, 96
; LMULMAX1-NEXT: vse64.v v25, (a2)
; LMULMAX1-NEXT: vse64.v v25, (a1)
; LMULMAX1-NEXT: vse64.v v25, (a5)
; LMULMAX1-NEXT: vse64.v v25, (a4)
; LMULMAX1-NEXT: vse64.v v25, (a3)
; LMULMAX1-NEXT: vse64.v v25, (a7)
; LMULMAX1-NEXT: vse64.v v25, (a0)
; LMULMAX1-NEXT: vse64.v v25, (a6)
; LMULMAX1-NEXT: vse64.v v8, (a2)
; LMULMAX1-NEXT: vse64.v v8, (a1)
; LMULMAX1-NEXT: vse64.v v8, (a5)
; LMULMAX1-NEXT: vse64.v v8, (a4)
; LMULMAX1-NEXT: vse64.v v8, (a3)
; LMULMAX1-NEXT: vse64.v v8, (a7)
; LMULMAX1-NEXT: vse64.v v8, (a0)
; LMULMAX1-NEXT: vse64.v v8, (a6)
; LMULMAX1-NEXT: ret
%a = load <16 x double>, <16 x double>* %x
%b = extractelement <16 x double> %a, i32 10
Expand Down
1,152 changes: 576 additions & 576 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll

Large diffs are not rendered by default.

508 changes: 254 additions & 254 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i.ll

Large diffs are not rendered by default.

432 changes: 216 additions & 216 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-i2fp.ll

Large diffs are not rendered by default.

142 changes: 71 additions & 71 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-i1.ll
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,13 @@ define <1 x i1> @insertelt_v1i1(<1 x i1> %x, i1 %elt) nounwind {
; CHECK-LABEL: insertelt_v1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; CHECK-NEXT: vmv.v.i v25, 0
; CHECK-NEXT: vmerge.vim v25, v25, 1, v0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu
; CHECK-NEXT: vmv.s.x v25, a0
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vand.vi v25, v25, 1
; CHECK-NEXT: vmsne.vi v0, v25, 0
; CHECK-NEXT: vand.vi v8, v8, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
%y = insertelement <1 x i1> %x, i1 %elt, i64 0
ret <1 x i1> %y
Expand All @@ -24,30 +24,30 @@ define <1 x i1> @insertelt_idx_v1i1(<1 x i1> %x, i1 %elt, i32 zeroext %idx) noun
; RV32-LABEL: insertelt_idx_v1i1:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; RV32-NEXT: vmv.s.x v25, a0
; RV32-NEXT: vmv.v.i v26, 0
; RV32-NEXT: vmerge.vim v26, v26, 1, v0
; RV32-NEXT: vmv.s.x v8, a0
; RV32-NEXT: vmv.v.i v9, 0
; RV32-NEXT: vmerge.vim v9, v9, 1, v0
; RV32-NEXT: addi a0, a1, 1
; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
; RV32-NEXT: vslideup.vx v26, v25, a1
; RV32-NEXT: vslideup.vx v9, v8, a1
; RV32-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; RV32-NEXT: vand.vi v25, v26, 1
; RV32-NEXT: vmsne.vi v0, v25, 0
; RV32-NEXT: vand.vi v8, v9, 1
; RV32-NEXT: vmsne.vi v0, v8, 0
; RV32-NEXT: ret
;
; RV64-LABEL: insertelt_idx_v1i1:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; RV64-NEXT: vmv.s.x v25, a0
; RV64-NEXT: vmv.v.i v26, 0
; RV64-NEXT: vmerge.vim v26, v26, 1, v0
; RV64-NEXT: vmv.s.x v8, a0
; RV64-NEXT: vmv.v.i v9, 0
; RV64-NEXT: vmerge.vim v9, v9, 1, v0
; RV64-NEXT: sext.w a0, a1
; RV64-NEXT: addi a1, a0, 1
; RV64-NEXT: vsetvli zero, a1, e8, mf8, tu, mu
; RV64-NEXT: vslideup.vx v26, v25, a0
; RV64-NEXT: vslideup.vx v9, v8, a0
; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; RV64-NEXT: vand.vi v25, v26, 1
; RV64-NEXT: vmsne.vi v0, v25, 0
; RV64-NEXT: vand.vi v8, v9, 1
; RV64-NEXT: vmsne.vi v0, v8, 0
; RV64-NEXT: ret
%y = insertelement <1 x i1> %x, i1 %elt, i32 %idx
ret <1 x i1> %y
Expand All @@ -57,14 +57,14 @@ define <2 x i1> @insertelt_v2i1(<2 x i1> %x, i1 %elt) nounwind {
; CHECK-LABEL: insertelt_v2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vmv.s.x v25, a0
; CHECK-NEXT: vmv.v.i v26, 0
; CHECK-NEXT: vmerge.vim v26, v26, 1, v0
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu
; CHECK-NEXT: vslideup.vi v26, v25, 1
; CHECK-NEXT: vslideup.vi v9, v8, 1
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vand.vi v25, v26, 1
; CHECK-NEXT: vmsne.vi v0, v25, 0
; CHECK-NEXT: vand.vi v8, v9, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
%y = insertelement <2 x i1> %x, i1 %elt, i64 1
ret <2 x i1> %y
Expand All @@ -74,30 +74,30 @@ define <2 x i1> @insertelt_idx_v2i1(<2 x i1> %x, i1 %elt, i32 zeroext %idx) noun
; RV32-LABEL: insertelt_idx_v2i1:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; RV32-NEXT: vmv.s.x v25, a0
; RV32-NEXT: vmv.v.i v26, 0
; RV32-NEXT: vmerge.vim v26, v26, 1, v0
; RV32-NEXT: vmv.s.x v8, a0
; RV32-NEXT: vmv.v.i v9, 0
; RV32-NEXT: vmerge.vim v9, v9, 1, v0
; RV32-NEXT: addi a0, a1, 1
; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
; RV32-NEXT: vslideup.vx v26, v25, a1
; RV32-NEXT: vslideup.vx v9, v8, a1
; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; RV32-NEXT: vand.vi v25, v26, 1
; RV32-NEXT: vmsne.vi v0, v25, 0
; RV32-NEXT: vand.vi v8, v9, 1
; RV32-NEXT: vmsne.vi v0, v8, 0
; RV32-NEXT: ret
;
; RV64-LABEL: insertelt_idx_v2i1:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; RV64-NEXT: vmv.s.x v25, a0
; RV64-NEXT: vmv.v.i v26, 0
; RV64-NEXT: vmerge.vim v26, v26, 1, v0
; RV64-NEXT: vmv.s.x v8, a0
; RV64-NEXT: vmv.v.i v9, 0
; RV64-NEXT: vmerge.vim v9, v9, 1, v0
; RV64-NEXT: sext.w a0, a1
; RV64-NEXT: addi a1, a0, 1
; RV64-NEXT: vsetvli zero, a1, e8, mf8, tu, mu
; RV64-NEXT: vslideup.vx v26, v25, a0
; RV64-NEXT: vslideup.vx v9, v8, a0
; RV64-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; RV64-NEXT: vand.vi v25, v26, 1
; RV64-NEXT: vmsne.vi v0, v25, 0
; RV64-NEXT: vand.vi v8, v9, 1
; RV64-NEXT: vmsne.vi v0, v8, 0
; RV64-NEXT: ret
%y = insertelement <2 x i1> %x, i1 %elt, i32 %idx
ret <2 x i1> %y
Expand All @@ -107,14 +107,14 @@ define <8 x i1> @insertelt_v8i1(<8 x i1> %x, i1 %elt) nounwind {
; CHECK-LABEL: insertelt_v8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.s.x v25, a0
; CHECK-NEXT: vmv.v.i v26, 0
; CHECK-NEXT: vmerge.vim v26, v26, 1, v0
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
; CHECK-NEXT: vslideup.vi v26, v25, 1
; CHECK-NEXT: vslideup.vi v9, v8, 1
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vand.vi v25, v26, 1
; CHECK-NEXT: vmsne.vi v0, v25, 0
; CHECK-NEXT: vand.vi v8, v9, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
%y = insertelement <8 x i1> %x, i1 %elt, i64 1
ret <8 x i1> %y
Expand All @@ -124,30 +124,30 @@ define <8 x i1> @insertelt_idx_v8i1(<8 x i1> %x, i1 %elt, i32 zeroext %idx) noun
; RV32-LABEL: insertelt_idx_v8i1:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV32-NEXT: vmv.s.x v25, a0
; RV32-NEXT: vmv.v.i v26, 0
; RV32-NEXT: vmerge.vim v26, v26, 1, v0
; RV32-NEXT: vmv.s.x v8, a0
; RV32-NEXT: vmv.v.i v9, 0
; RV32-NEXT: vmerge.vim v9, v9, 1, v0
; RV32-NEXT: addi a0, a1, 1
; RV32-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
; RV32-NEXT: vslideup.vx v26, v25, a1
; RV32-NEXT: vslideup.vx v9, v8, a1
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV32-NEXT: vand.vi v25, v26, 1
; RV32-NEXT: vmsne.vi v0, v25, 0
; RV32-NEXT: vand.vi v8, v9, 1
; RV32-NEXT: vmsne.vi v0, v8, 0
; RV32-NEXT: ret
;
; RV64-LABEL: insertelt_idx_v8i1:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV64-NEXT: vmv.s.x v25, a0
; RV64-NEXT: vmv.v.i v26, 0
; RV64-NEXT: vmerge.vim v26, v26, 1, v0
; RV64-NEXT: vmv.s.x v8, a0
; RV64-NEXT: vmv.v.i v9, 0
; RV64-NEXT: vmerge.vim v9, v9, 1, v0
; RV64-NEXT: sext.w a0, a1
; RV64-NEXT: addi a1, a0, 1
; RV64-NEXT: vsetvli zero, a1, e8, mf2, tu, mu
; RV64-NEXT: vslideup.vx v26, v25, a0
; RV64-NEXT: vslideup.vx v9, v8, a0
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV64-NEXT: vand.vi v25, v26, 1
; RV64-NEXT: vmsne.vi v0, v25, 0
; RV64-NEXT: vand.vi v8, v9, 1
; RV64-NEXT: vmsne.vi v0, v8, 0
; RV64-NEXT: ret
%y = insertelement <8 x i1> %x, i1 %elt, i32 %idx
ret <8 x i1> %y
Expand All @@ -158,14 +158,14 @@ define <64 x i1> @insertelt_v64i1(<64 x i1> %x, i1 %elt) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: addi a1, zero, 64
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmv.s.x v28, a0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: vmv.v.i v12, 0
; CHECK-NEXT: vmerge.vim v12, v12, 1, v0
; CHECK-NEXT: vsetivli zero, 2, e8, m4, tu, mu
; CHECK-NEXT: vslideup.vi v8, v28, 1
; CHECK-NEXT: vslideup.vi v12, v8, 1
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vand.vi v28, v8, 1
; CHECK-NEXT: vmsne.vi v0, v28, 0
; CHECK-NEXT: vand.vi v8, v12, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
%y = insertelement <64 x i1> %x, i1 %elt, i64 1
ret <64 x i1> %y
Expand All @@ -176,31 +176,31 @@ define <64 x i1> @insertelt_idx_v64i1(<64 x i1> %x, i1 %elt, i32 zeroext %idx) n
; RV32: # %bb.0:
; RV32-NEXT: addi a2, zero, 64
; RV32-NEXT: vsetvli zero, a2, e8, m4, ta, mu
; RV32-NEXT: vmv.s.x v28, a0
; RV32-NEXT: vmv.v.i v8, 0
; RV32-NEXT: vmerge.vim v8, v8, 1, v0
; RV32-NEXT: vmv.s.x v8, a0
; RV32-NEXT: vmv.v.i v12, 0
; RV32-NEXT: vmerge.vim v12, v12, 1, v0
; RV32-NEXT: addi a0, a1, 1
; RV32-NEXT: vsetvli zero, a0, e8, m4, tu, mu
; RV32-NEXT: vslideup.vx v8, v28, a1
; RV32-NEXT: vslideup.vx v12, v8, a1
; RV32-NEXT: vsetvli zero, a2, e8, m4, ta, mu
; RV32-NEXT: vand.vi v28, v8, 1
; RV32-NEXT: vmsne.vi v0, v28, 0
; RV32-NEXT: vand.vi v8, v12, 1
; RV32-NEXT: vmsne.vi v0, v8, 0
; RV32-NEXT: ret
;
; RV64-LABEL: insertelt_idx_v64i1:
; RV64: # %bb.0:
; RV64-NEXT: addi a2, zero, 64
; RV64-NEXT: vsetvli zero, a2, e8, m4, ta, mu
; RV64-NEXT: vmv.s.x v28, a0
; RV64-NEXT: vmv.v.i v8, 0
; RV64-NEXT: vmerge.vim v8, v8, 1, v0
; RV64-NEXT: vmv.s.x v8, a0
; RV64-NEXT: vmv.v.i v12, 0
; RV64-NEXT: vmerge.vim v12, v12, 1, v0
; RV64-NEXT: sext.w a0, a1
; RV64-NEXT: addi a1, a0, 1
; RV64-NEXT: vsetvli zero, a1, e8, m4, tu, mu
; RV64-NEXT: vslideup.vx v8, v28, a0
; RV64-NEXT: vslideup.vx v12, v8, a0
; RV64-NEXT: vsetvli zero, a2, e8, m4, ta, mu
; RV64-NEXT: vand.vi v28, v8, 1
; RV64-NEXT: vmsne.vi v0, v28, 0
; RV64-NEXT: vand.vi v8, v12, 1
; RV64-NEXT: vmsne.vi v0, v8, 0
; RV64-NEXT: ret
%y = insertelement <64 x i1> %x, i1 %elt, i32 %idx
ret <64 x i1> %y
Expand Down
240 changes: 120 additions & 120 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll

Large diffs are not rendered by default.

136 changes: 68 additions & 68 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
Original file line number Diff line number Diff line change
Expand Up @@ -9,24 +9,24 @@ define void @insertelt_v4i64(<4 x i64>* %x, i64 %y) {
; RV32-LABEL: insertelt_v4i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: vle64.v v26, (a0)
; RV32-NEXT: vle64.v v8, (a0)
; RV32-NEXT: vsetivli zero, 2, e32, m2, ta, mu
; RV32-NEXT: vmv.v.i v28, 0
; RV32-NEXT: vslide1up.vx v30, v28, a2
; RV32-NEXT: vslide1up.vx v28, v30, a1
; RV32-NEXT: vmv.v.i v10, 0
; RV32-NEXT: vslide1up.vx v12, v10, a2
; RV32-NEXT: vslide1up.vx v10, v12, a1
; RV32-NEXT: vsetivli zero, 4, e64, m2, tu, mu
; RV32-NEXT: vslideup.vi v26, v28, 3
; RV32-NEXT: vse64.v v26, (a0)
; RV32-NEXT: vslideup.vi v8, v10, 3
; RV32-NEXT: vse64.v v8, (a0)
; RV32-NEXT: ret
;
; RV64-LABEL: insertelt_v4i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV64-NEXT: vle64.v v26, (a0)
; RV64-NEXT: vmv.s.x v28, a1
; RV64-NEXT: vle64.v v8, (a0)
; RV64-NEXT: vmv.s.x v10, a1
; RV64-NEXT: vsetvli zero, zero, e64, m2, tu, mu
; RV64-NEXT: vslideup.vi v26, v28, 3
; RV64-NEXT: vse64.v v26, (a0)
; RV64-NEXT: vslideup.vi v8, v10, 3
; RV64-NEXT: vse64.v v8, (a0)
; RV64-NEXT: ret
%a = load <4 x i64>, <4 x i64>* %x
%b = insertelement <4 x i64> %a, i64 %y, i32 3
Expand All @@ -42,29 +42,29 @@ define void @insertelt_v3i64(<3 x i64>* %x, i64 %y) {
; RV32-LABEL: insertelt_v3i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vle64.v v26, (a0)
; RV32-NEXT: vle64.v v8, (a0)
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; RV32-NEXT: vmv.v.i v28, 0
; RV32-NEXT: vmv.v.i v10, 0
; RV32-NEXT: vsetivli zero, 2, e64, m2, tu, mu
; RV32-NEXT: vslideup.vi v28, v26, 0
; RV32-NEXT: vslideup.vi v10, v8, 0
; RV32-NEXT: lw a3, 16(a0)
; RV32-NEXT: addi a4, a0, 20
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; RV32-NEXT: vlse32.v v26, (a4), zero
; RV32-NEXT: vlse32.v v8, (a4), zero
; RV32-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-NEXT: vmv.s.x v26, a3
; RV32-NEXT: vmv.s.x v8, a3
; RV32-NEXT: vsetvli zero, zero, e64, m2, tu, mu
; RV32-NEXT: vslideup.vi v28, v26, 2
; RV32-NEXT: vslideup.vi v10, v8, 2
; RV32-NEXT: vsetivli zero, 2, e32, m2, ta, mu
; RV32-NEXT: vmv.v.i v26, 0
; RV32-NEXT: vslide1up.vx v30, v26, a2
; RV32-NEXT: vslide1up.vx v26, v30, a1
; RV32-NEXT: vmv.v.i v8, 0
; RV32-NEXT: vslide1up.vx v12, v8, a2
; RV32-NEXT: vslide1up.vx v8, v12, a1
; RV32-NEXT: vsetivli zero, 3, e64, m2, tu, mu
; RV32-NEXT: vslideup.vi v28, v26, 2
; RV32-NEXT: vslideup.vi v10, v8, 2
; RV32-NEXT: sw a1, 16(a0)
; RV32-NEXT: sw a2, 20(a0)
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vse64.v v28, (a0)
; RV32-NEXT: vse64.v v10, (a0)
; RV32-NEXT: ret
;
; RV64-LABEL: insertelt_v3i64:
Expand All @@ -81,12 +81,12 @@ define void @insertelt_v16i8(<16 x i8>* %x, i8 %y) {
; CHECK-LABEL: insertelt_v16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vmv.s.x v26, a1
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vmv.s.x v9, a1
; CHECK-NEXT: vsetivli zero, 15, e8, m1, tu, mu
; CHECK-NEXT: vslideup.vi v25, v26, 14
; CHECK-NEXT: vslideup.vi v8, v9, 14
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vse8.v v25, (a0)
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
%a = load <16 x i8>, <16 x i8>* %x
%b = insertelement <16 x i8> %a, i8 %y, i32 14
Expand All @@ -99,27 +99,27 @@ define void @insertelt_v32i16(<32 x i16>* %x, i16 %y, i32 %idx) {
; RV32: # %bb.0:
; RV32-NEXT: addi a3, zero, 32
; RV32-NEXT: vsetvli zero, a3, e16, m4, ta, mu
; RV32-NEXT: vle16.v v28, (a0)
; RV32-NEXT: vmv.s.x v8, a1
; RV32-NEXT: vle16.v v8, (a0)
; RV32-NEXT: vmv.s.x v12, a1
; RV32-NEXT: addi a1, a2, 1
; RV32-NEXT: vsetvli zero, a1, e16, m4, tu, mu
; RV32-NEXT: vslideup.vx v28, v8, a2
; RV32-NEXT: vslideup.vx v8, v12, a2
; RV32-NEXT: vsetvli zero, a3, e16, m4, ta, mu
; RV32-NEXT: vse16.v v28, (a0)
; RV32-NEXT: vse16.v v8, (a0)
; RV32-NEXT: ret
;
; RV64-LABEL: insertelt_v32i16:
; RV64: # %bb.0:
; RV64-NEXT: addi a3, zero, 32
; RV64-NEXT: vsetvli zero, a3, e16, m4, ta, mu
; RV64-NEXT: vle16.v v28, (a0)
; RV64-NEXT: vmv.s.x v8, a1
; RV64-NEXT: vle16.v v8, (a0)
; RV64-NEXT: vmv.s.x v12, a1
; RV64-NEXT: sext.w a1, a2
; RV64-NEXT: addi a2, a1, 1
; RV64-NEXT: vsetvli zero, a2, e16, m4, tu, mu
; RV64-NEXT: vslideup.vx v28, v8, a1
; RV64-NEXT: vslideup.vx v8, v12, a1
; RV64-NEXT: vsetvli zero, a3, e16, m4, ta, mu
; RV64-NEXT: vse16.v v28, (a0)
; RV64-NEXT: vse16.v v8, (a0)
; RV64-NEXT: ret
%a = load <32 x i16>, <32 x i16>* %x
%b = insertelement <32 x i16> %a, i16 %y, i32 %idx
Expand All @@ -131,26 +131,26 @@ define void @insertelt_v8f32(<8 x float>* %x, float %y, i32 %idx) {
; RV32-LABEL: insertelt_v8f32:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; RV32-NEXT: vle32.v v26, (a0)
; RV32-NEXT: vfmv.s.f v28, fa0
; RV32-NEXT: vle32.v v8, (a0)
; RV32-NEXT: vfmv.s.f v10, fa0
; RV32-NEXT: addi a2, a1, 1
; RV32-NEXT: vsetvli zero, a2, e32, m2, tu, mu
; RV32-NEXT: vslideup.vx v26, v28, a1
; RV32-NEXT: vslideup.vx v8, v10, a1
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; RV32-NEXT: vse32.v v26, (a0)
; RV32-NEXT: vse32.v v8, (a0)
; RV32-NEXT: ret
;
; RV64-LABEL: insertelt_v8f32:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; RV64-NEXT: vle32.v v26, (a0)
; RV64-NEXT: vfmv.s.f v28, fa0
; RV64-NEXT: vle32.v v8, (a0)
; RV64-NEXT: vfmv.s.f v10, fa0
; RV64-NEXT: sext.w a1, a1
; RV64-NEXT: addi a2, a1, 1
; RV64-NEXT: vsetvli zero, a2, e32, m2, tu, mu
; RV64-NEXT: vslideup.vx v26, v28, a1
; RV64-NEXT: vslideup.vx v8, v10, a1
; RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; RV64-NEXT: vse32.v v26, (a0)
; RV64-NEXT: vse32.v v8, (a0)
; RV64-NEXT: ret
%a = load <8 x float>, <8 x float>* %x
%b = insertelement <8 x float> %a, float %y, i32 %idx
Expand All @@ -162,11 +162,11 @@ define void @insertelt_v8i64_0(<8 x i64>* %x) {
; CHECK-LABEL: insertelt_v8i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; CHECK-NEXT: vle64.v v28, (a0)
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: addi a1, zero, -1
; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu
; CHECK-NEXT: vmv.s.x v28, a1
; CHECK-NEXT: vse64.v v28, (a0)
; CHECK-NEXT: vmv.s.x v8, a1
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: ret
%a = load <8 x i64>, <8 x i64>* %x
%b = insertelement <8 x i64> %a, i64 -1, i32 0
Expand All @@ -178,28 +178,28 @@ define void @insertelt_v8i64(<8 x i64>* %x, i32 %idx) {
; RV32-LABEL: insertelt_v8i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vle64.v v28, (a0)
; RV32-NEXT: vle64.v v8, (a0)
; RV32-NEXT: addi a2, zero, -1
; RV32-NEXT: vmv.s.x v8, a2
; RV32-NEXT: vmv.s.x v12, a2
; RV32-NEXT: addi a2, a1, 1
; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu
; RV32-NEXT: vslideup.vx v28, v8, a1
; RV32-NEXT: vslideup.vx v8, v12, a1
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vse64.v v28, (a0)
; RV32-NEXT: vse64.v v8, (a0)
; RV32-NEXT: ret
;
; RV64-LABEL: insertelt_v8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV64-NEXT: vle64.v v28, (a0)
; RV64-NEXT: vle64.v v8, (a0)
; RV64-NEXT: addi a2, zero, -1
; RV64-NEXT: vmv.s.x v8, a2
; RV64-NEXT: vmv.s.x v12, a2
; RV64-NEXT: sext.w a1, a1
; RV64-NEXT: addi a2, a1, 1
; RV64-NEXT: vsetvli zero, a2, e64, m4, tu, mu
; RV64-NEXT: vslideup.vx v28, v8, a1
; RV64-NEXT: vslideup.vx v8, v12, a1
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV64-NEXT: vse64.v v28, (a0)
; RV64-NEXT: vse64.v v8, (a0)
; RV64-NEXT: ret
%a = load <8 x i64>, <8 x i64>* %x
%b = insertelement <8 x i64> %a, i64 -1, i32 %idx
Expand All @@ -211,11 +211,11 @@ define void @insertelt_c6_v8i64_0(<8 x i64>* %x) {
; CHECK-LABEL: insertelt_c6_v8i64_0:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; CHECK-NEXT: vle64.v v28, (a0)
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: addi a1, zero, 6
; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu
; CHECK-NEXT: vmv.s.x v28, a1
; CHECK-NEXT: vse64.v v28, (a0)
; CHECK-NEXT: vmv.s.x v8, a1
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: ret
%a = load <8 x i64>, <8 x i64>* %x
%b = insertelement <8 x i64> %a, i64 6, i32 0
Expand All @@ -227,28 +227,28 @@ define void @insertelt_c6_v8i64(<8 x i64>* %x, i32 %idx) {
; RV32-LABEL: insertelt_c6_v8i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vle64.v v28, (a0)
; RV32-NEXT: vle64.v v8, (a0)
; RV32-NEXT: addi a2, zero, 6
; RV32-NEXT: vmv.s.x v8, a2
; RV32-NEXT: vmv.s.x v12, a2
; RV32-NEXT: addi a2, a1, 1
; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu
; RV32-NEXT: vslideup.vx v28, v8, a1
; RV32-NEXT: vslideup.vx v8, v12, a1
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vse64.v v28, (a0)
; RV32-NEXT: vse64.v v8, (a0)
; RV32-NEXT: ret
;
; RV64-LABEL: insertelt_c6_v8i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV64-NEXT: vle64.v v28, (a0)
; RV64-NEXT: vle64.v v8, (a0)
; RV64-NEXT: addi a2, zero, 6
; RV64-NEXT: vmv.s.x v8, a2
; RV64-NEXT: vmv.s.x v12, a2
; RV64-NEXT: sext.w a1, a1
; RV64-NEXT: addi a2, a1, 1
; RV64-NEXT: vsetvli zero, a2, e64, m4, tu, mu
; RV64-NEXT: vslideup.vx v28, v8, a1
; RV64-NEXT: vslideup.vx v8, v12, a1
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV64-NEXT: vse64.v v28, (a0)
; RV64-NEXT: vse64.v v8, (a0)
; RV64-NEXT: ret
%a = load <8 x i64>, <8 x i64>* %x
%b = insertelement <8 x i64> %a, i64 6, i32 %idx
Expand All @@ -262,14 +262,14 @@ define void @insertelt_c6_v8i64_0_add(<8 x i64>* %x, <8 x i64>* %y) {
; CHECK-LABEL: insertelt_c6_v8i64_0_add:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; CHECK-NEXT: vle64.v v28, (a0)
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: addi a2, zero, 6
; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, mu
; CHECK-NEXT: vmv.s.x v28, a2
; CHECK-NEXT: vmv.s.x v8, a2
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vle64.v v8, (a1)
; CHECK-NEXT: vadd.vv v28, v28, v8
; CHECK-NEXT: vse64.v v28, (a0)
; CHECK-NEXT: vle64.v v12, (a1)
; CHECK-NEXT: vadd.vv v8, v8, v12
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: ret
%a = load <8 x i64>, <8 x i64>* %x
%b = insertelement <8 x i64> %a, i64 6, i32 0
Expand Down
316 changes: 158 additions & 158 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll

Large diffs are not rendered by default.

158 changes: 79 additions & 79 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,10 @@ define void @sext_v4i8_v4i32(<4 x i8>* %x, <4 x i32>* %z) {
; CHECK-LABEL: sext_v4i8_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; CHECK-NEXT: vsext.vf4 v26, v25
; CHECK-NEXT: vse32.v v26, (a1)
; CHECK-NEXT: vsext.vf4 v9, v8
; CHECK-NEXT: vse32.v v9, (a1)
; CHECK-NEXT: ret
%a = load <4 x i8>, <4 x i8>* %x
%b = sext <4 x i8> %a to <4 x i32>
Expand All @@ -25,10 +25,10 @@ define void @zext_v4i8_v4i32(<4 x i8>* %x, <4 x i32>* %z) {
; CHECK-LABEL: zext_v4i8_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu
; CHECK-NEXT: vzext.vf4 v26, v25
; CHECK-NEXT: vse32.v v26, (a1)
; CHECK-NEXT: vzext.vf4 v9, v8
; CHECK-NEXT: vse32.v v9, (a1)
; CHECK-NEXT: ret
%a = load <4 x i8>, <4 x i8>* %x
%b = zext <4 x i8> %a to <4 x i32>
Expand All @@ -40,33 +40,33 @@ define void @sext_v8i8_v8i32(<8 x i8>* %x, <8 x i32>* %z) {
; LMULMAX8-LABEL: sext_v8i8_v8i32:
; LMULMAX8: # %bb.0:
; LMULMAX8-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX8-NEXT: vle8.v v25, (a0)
; LMULMAX8-NEXT: vle8.v v8, (a0)
; LMULMAX8-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; LMULMAX8-NEXT: vsext.vf4 v26, v25
; LMULMAX8-NEXT: vse32.v v26, (a1)
; LMULMAX8-NEXT: vsext.vf4 v10, v8
; LMULMAX8-NEXT: vse32.v v10, (a1)
; LMULMAX8-NEXT: ret
;
; LMULMAX2-LABEL: sext_v8i8_v8i32:
; LMULMAX2: # %bb.0:
; LMULMAX2-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX2-NEXT: vle8.v v25, (a0)
; LMULMAX2-NEXT: vle8.v v8, (a0)
; LMULMAX2-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; LMULMAX2-NEXT: vsext.vf4 v26, v25
; LMULMAX2-NEXT: vse32.v v26, (a1)
; LMULMAX2-NEXT: vsext.vf4 v10, v8
; LMULMAX2-NEXT: vse32.v v10, (a1)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: sext_v8i8_v8i32:
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX1-NEXT: vle8.v v25, (a0)
; LMULMAX1-NEXT: vle8.v v8, (a0)
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; LMULMAX1-NEXT: vslidedown.vi v26, v25, 4
; LMULMAX1-NEXT: vslidedown.vi v9, v8, 4
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; LMULMAX1-NEXT: vsext.vf4 v27, v26
; LMULMAX1-NEXT: vsext.vf4 v26, v25
; LMULMAX1-NEXT: vsext.vf4 v10, v9
; LMULMAX1-NEXT: vsext.vf4 v9, v8
; LMULMAX1-NEXT: addi a0, a1, 16
; LMULMAX1-NEXT: vse32.v v27, (a0)
; LMULMAX1-NEXT: vse32.v v26, (a1)
; LMULMAX1-NEXT: vse32.v v10, (a0)
; LMULMAX1-NEXT: vse32.v v9, (a1)
; LMULMAX1-NEXT: ret
%a = load <8 x i8>, <8 x i8>* %x
%b = sext <8 x i8> %a to <8 x i32>
Expand All @@ -79,83 +79,83 @@ define void @sext_v32i8_v32i32(<32 x i8>* %x, <32 x i32>* %z) {
; LMULMAX8: # %bb.0:
; LMULMAX8-NEXT: addi a2, zero, 32
; LMULMAX8-NEXT: vsetvli zero, a2, e8, m2, ta, mu
; LMULMAX8-NEXT: vle8.v v26, (a0)
; LMULMAX8-NEXT: vle8.v v8, (a0)
; LMULMAX8-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; LMULMAX8-NEXT: vsext.vf4 v8, v26
; LMULMAX8-NEXT: vse32.v v8, (a1)
; LMULMAX8-NEXT: vsext.vf4 v16, v8
; LMULMAX8-NEXT: vse32.v v16, (a1)
; LMULMAX8-NEXT: ret
;
; LMULMAX2-LABEL: sext_v32i8_v32i32:
; LMULMAX2: # %bb.0:
; LMULMAX2-NEXT: addi a2, zero, 32
; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu
; LMULMAX2-NEXT: vle8.v v26, (a0)
; LMULMAX2-NEXT: vle8.v v8, (a0)
; LMULMAX2-NEXT: vsetivli zero, 8, e8, m1, ta, mu
; LMULMAX2-NEXT: vslidedown.vi v25, v26, 8
; LMULMAX2-NEXT: vslidedown.vi v10, v8, 8
; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; LMULMAX2-NEXT: vsext.vf4 v28, v25
; LMULMAX2-NEXT: vsext.vf4 v12, v10
; LMULMAX2-NEXT: vsetivli zero, 16, e8, m2, ta, mu
; LMULMAX2-NEXT: vslidedown.vi v30, v26, 16
; LMULMAX2-NEXT: vslidedown.vi v10, v8, 16
; LMULMAX2-NEXT: vsetivli zero, 8, e8, m1, ta, mu
; LMULMAX2-NEXT: vslidedown.vi v25, v30, 8
; LMULMAX2-NEXT: vslidedown.vi v14, v10, 8
; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; LMULMAX2-NEXT: vsext.vf4 v8, v25
; LMULMAX2-NEXT: vsext.vf4 v10, v26
; LMULMAX2-NEXT: vsext.vf4 v26, v30
; LMULMAX2-NEXT: vsext.vf4 v16, v14
; LMULMAX2-NEXT: vsext.vf4 v14, v8
; LMULMAX2-NEXT: vsext.vf4 v8, v10
; LMULMAX2-NEXT: addi a0, a1, 64
; LMULMAX2-NEXT: vse32.v v26, (a0)
; LMULMAX2-NEXT: vse32.v v10, (a1)
; LMULMAX2-NEXT: addi a0, a1, 96
; LMULMAX2-NEXT: vse32.v v8, (a0)
; LMULMAX2-NEXT: vse32.v v14, (a1)
; LMULMAX2-NEXT: addi a0, a1, 96
; LMULMAX2-NEXT: vse32.v v16, (a0)
; LMULMAX2-NEXT: addi a0, a1, 32
; LMULMAX2-NEXT: vse32.v v28, (a0)
; LMULMAX2-NEXT: vse32.v v12, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: sext_v32i8_v32i32:
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; LMULMAX1-NEXT: addi a2, a0, 16
; LMULMAX1-NEXT: vle8.v v25, (a2)
; LMULMAX1-NEXT: vle8.v v26, (a0)
; LMULMAX1-NEXT: vle8.v v8, (a2)
; LMULMAX1-NEXT: vle8.v v9, (a0)
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; LMULMAX1-NEXT: vslidedown.vi v27, v25, 4
; LMULMAX1-NEXT: vslidedown.vi v10, v8, 4
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; LMULMAX1-NEXT: vsext.vf4 v28, v27
; LMULMAX1-NEXT: vsext.vf4 v11, v10
; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, mu
; LMULMAX1-NEXT: vslidedown.vi v27, v25, 8
; LMULMAX1-NEXT: vslidedown.vi v10, v8, 8
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; LMULMAX1-NEXT: vslidedown.vi v29, v27, 4
; LMULMAX1-NEXT: vslidedown.vi v12, v10, 4
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; LMULMAX1-NEXT: vsext.vf4 v30, v29
; LMULMAX1-NEXT: vsext.vf4 v13, v12
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; LMULMAX1-NEXT: vslidedown.vi v29, v26, 4
; LMULMAX1-NEXT: vslidedown.vi v12, v9, 4
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; LMULMAX1-NEXT: vsext.vf4 v31, v29
; LMULMAX1-NEXT: vsext.vf4 v14, v12
; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, mu
; LMULMAX1-NEXT: vslidedown.vi v29, v26, 8
; LMULMAX1-NEXT: vslidedown.vi v12, v9, 8
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; LMULMAX1-NEXT: vslidedown.vi v8, v29, 4
; LMULMAX1-NEXT: vslidedown.vi v15, v12, 4
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; LMULMAX1-NEXT: vsext.vf4 v9, v8
; LMULMAX1-NEXT: vsext.vf4 v8, v27
; LMULMAX1-NEXT: vsext.vf4 v27, v29
; LMULMAX1-NEXT: vsext.vf4 v29, v25
; LMULMAX1-NEXT: vsext.vf4 v25, v26
; LMULMAX1-NEXT: vsext.vf4 v16, v15
; LMULMAX1-NEXT: vsext.vf4 v15, v10
; LMULMAX1-NEXT: vsext.vf4 v10, v12
; LMULMAX1-NEXT: vsext.vf4 v12, v8
; LMULMAX1-NEXT: vsext.vf4 v8, v9
; LMULMAX1-NEXT: addi a0, a1, 32
; LMULMAX1-NEXT: vse32.v v27, (a0)
; LMULMAX1-NEXT: vse32.v v25, (a1)
; LMULMAX1-NEXT: vse32.v v10, (a0)
; LMULMAX1-NEXT: vse32.v v8, (a1)
; LMULMAX1-NEXT: addi a0, a1, 96
; LMULMAX1-NEXT: vse32.v v8, (a0)
; LMULMAX1-NEXT: vse32.v v15, (a0)
; LMULMAX1-NEXT: addi a0, a1, 64
; LMULMAX1-NEXT: vse32.v v29, (a0)
; LMULMAX1-NEXT: vse32.v v12, (a0)
; LMULMAX1-NEXT: addi a0, a1, 48
; LMULMAX1-NEXT: vse32.v v9, (a0)
; LMULMAX1-NEXT: vse32.v v16, (a0)
; LMULMAX1-NEXT: addi a0, a1, 16
; LMULMAX1-NEXT: vse32.v v31, (a0)
; LMULMAX1-NEXT: vse32.v v14, (a0)
; LMULMAX1-NEXT: addi a0, a1, 112
; LMULMAX1-NEXT: vse32.v v30, (a0)
; LMULMAX1-NEXT: vse32.v v13, (a0)
; LMULMAX1-NEXT: addi a0, a1, 80
; LMULMAX1-NEXT: vse32.v v28, (a0)
; LMULMAX1-NEXT: vse32.v v11, (a0)
; LMULMAX1-NEXT: ret
%a = load <32 x i8>, <32 x i8>* %x
%b = sext <32 x i8> %a to <32 x i32>
Expand All @@ -167,12 +167,12 @@ define void @trunc_v4i8_v4i32(<4 x i32>* %x, <4 x i8>* %z) {
; CHECK-LABEL: trunc_v4i8_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v25, v25, 0
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; CHECK-NEXT: vnsrl.wi v25, v25, 0
; CHECK-NEXT: vse8.v v25, (a1)
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vse8.v v8, (a1)
; CHECK-NEXT: ret
%a = load <4 x i32>, <4 x i32>* %x
%b = trunc <4 x i32> %a to <4 x i8>
Expand All @@ -184,46 +184,46 @@ define void @trunc_v8i8_v8i32(<8 x i32>* %x, <8 x i8>* %z) {
; LMULMAX8-LABEL: trunc_v8i8_v8i32:
; LMULMAX8: # %bb.0:
; LMULMAX8-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; LMULMAX8-NEXT: vle32.v v26, (a0)
; LMULMAX8-NEXT: vle32.v v8, (a0)
; LMULMAX8-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; LMULMAX8-NEXT: vnsrl.wi v25, v26, 0
; LMULMAX8-NEXT: vnsrl.wi v10, v8, 0
; LMULMAX8-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; LMULMAX8-NEXT: vnsrl.wi v25, v25, 0
; LMULMAX8-NEXT: vse8.v v25, (a1)
; LMULMAX8-NEXT: vnsrl.wi v8, v10, 0
; LMULMAX8-NEXT: vse8.v v8, (a1)
; LMULMAX8-NEXT: ret
;
; LMULMAX2-LABEL: trunc_v8i8_v8i32:
; LMULMAX2: # %bb.0:
; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; LMULMAX2-NEXT: vle32.v v26, (a0)
; LMULMAX2-NEXT: vle32.v v8, (a0)
; LMULMAX2-NEXT: vsetvli zero, zero, e16, m1, ta, mu
; LMULMAX2-NEXT: vnsrl.wi v25, v26, 0
; LMULMAX2-NEXT: vnsrl.wi v10, v8, 0
; LMULMAX2-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
; LMULMAX2-NEXT: vnsrl.wi v25, v25, 0
; LMULMAX2-NEXT: vse8.v v25, (a1)
; LMULMAX2-NEXT: vnsrl.wi v8, v10, 0
; LMULMAX2-NEXT: vse8.v v8, (a1)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: trunc_v8i8_v8i32:
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; LMULMAX1-NEXT: vle32.v v25, (a0)
; LMULMAX1-NEXT: vle32.v v8, (a0)
; LMULMAX1-NEXT: addi a0, a0, 16
; LMULMAX1-NEXT: vle32.v v26, (a0)
; LMULMAX1-NEXT: vle32.v v9, (a0)
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
; LMULMAX1-NEXT: vnsrl.wi v25, v25, 0
; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; LMULMAX1-NEXT: vnsrl.wi v25, v25, 0
; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; LMULMAX1-NEXT: vmv.v.i v27, 0
; LMULMAX1-NEXT: vmv.v.i v10, 0
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
; LMULMAX1-NEXT: vslideup.vi v27, v25, 0
; LMULMAX1-NEXT: vslideup.vi v10, v8, 0
; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; LMULMAX1-NEXT: vnsrl.wi v25, v26, 0
; LMULMAX1-NEXT: vnsrl.wi v8, v9, 0
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
; LMULMAX1-NEXT: vnsrl.wi v25, v25, 0
; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, mu
; LMULMAX1-NEXT: vslideup.vi v27, v25, 4
; LMULMAX1-NEXT: vse8.v v27, (a1)
; LMULMAX1-NEXT: vslideup.vi v10, v8, 4
; LMULMAX1-NEXT: vse8.v v10, (a1)
; LMULMAX1-NEXT: ret
%a = load <8 x i32>, <8 x i32>* %x
%b = trunc <8 x i32> %a to <8 x i8>
Expand Down
326 changes: 163 additions & 163 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-setcc.ll

Large diffs are not rendered by default.

252 changes: 126 additions & 126 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll

Large diffs are not rendered by default.

450 changes: 225 additions & 225 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-splat.ll

Large diffs are not rendered by default.

80 changes: 40 additions & 40 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-vrgather.ll
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@ define void @gather_const_v16i8(<16 x i8>* %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: addi a1, a0, 12
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vlse8.v v25, (a1), zero
; CHECK-NEXT: vse8.v v25, (a0)
; CHECK-NEXT: vlse8.v v8, (a1), zero
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
%a = load <16 x i8>, <16 x i8>* %x
%b = extractelement <16 x i8> %a, i32 12
Expand All @@ -25,8 +25,8 @@ define void @gather_const_v8i16(<8 x i16>* %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: addi a1, a0, 10
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vlse16.v v25, (a1), zero
; CHECK-NEXT: vse16.v v25, (a0)
; CHECK-NEXT: vlse16.v v8, (a1), zero
; CHECK-NEXT: vse16.v v8, (a0)
; CHECK-NEXT: ret
%a = load <8 x i16>, <8 x i16>* %x
%b = extractelement <8 x i16> %a, i32 5
Expand All @@ -41,8 +41,8 @@ define void @gather_const_v4i32(<4 x i32>* %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: addi a1, a0, 12
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vlse32.v v25, (a1), zero
; CHECK-NEXT: vse32.v v25, (a0)
; CHECK-NEXT: vlse32.v v8, (a1), zero
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: ret
%a = load <4 x i32>, <4 x i32>* %x
%b = extractelement <4 x i32> %a, i32 3
Expand All @@ -57,8 +57,8 @@ define void @gather_const_v2i64(<2 x i64>* %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: addi a1, a0, 8
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; CHECK-NEXT: vlse64.v v25, (a1), zero
; CHECK-NEXT: vse64.v v25, (a0)
; CHECK-NEXT: vlse64.v v8, (a1), zero
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: ret
%a = load <2 x i64>, <2 x i64>* %x
%b = extractelement <2 x i64> %a, i32 1
Expand All @@ -74,21 +74,21 @@ define void @gather_const_v64i8(<64 x i8>* %x) {
; LMULMAX4-NEXT: addi a1, a0, 32
; LMULMAX4-NEXT: addi a2, zero, 64
; LMULMAX4-NEXT: vsetvli zero, a2, e8, m4, ta, mu
; LMULMAX4-NEXT: vlse8.v v28, (a1), zero
; LMULMAX4-NEXT: vse8.v v28, (a0)
; LMULMAX4-NEXT: vlse8.v v8, (a1), zero
; LMULMAX4-NEXT: vse8.v v8, (a0)
; LMULMAX4-NEXT: ret
;
; LMULMAX1-LABEL: gather_const_v64i8:
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: addi a1, a0, 32
; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; LMULMAX1-NEXT: vlse8.v v25, (a1), zero
; LMULMAX1-NEXT: vlse8.v v8, (a1), zero
; LMULMAX1-NEXT: addi a2, a0, 16
; LMULMAX1-NEXT: addi a3, a0, 48
; LMULMAX1-NEXT: vse8.v v25, (a1)
; LMULMAX1-NEXT: vse8.v v25, (a3)
; LMULMAX1-NEXT: vse8.v v25, (a0)
; LMULMAX1-NEXT: vse8.v v25, (a2)
; LMULMAX1-NEXT: vse8.v v8, (a1)
; LMULMAX1-NEXT: vse8.v v8, (a3)
; LMULMAX1-NEXT: vse8.v v8, (a0)
; LMULMAX1-NEXT: vse8.v v8, (a2)
; LMULMAX1-NEXT: ret
%a = load <64 x i8>, <64 x i8>* %x
%b = extractelement <64 x i8> %a, i32 32
Expand All @@ -104,22 +104,22 @@ define void @gather_const_v16i16(<32 x i16>* %x) {
; LMULMAX4-NEXT: addi a1, a0, 50
; LMULMAX4-NEXT: addi a2, zero, 32
; LMULMAX4-NEXT: vsetvli zero, a2, e16, m4, ta, mu
; LMULMAX4-NEXT: vlse16.v v28, (a1), zero
; LMULMAX4-NEXT: vse16.v v28, (a0)
; LMULMAX4-NEXT: vlse16.v v8, (a1), zero
; LMULMAX4-NEXT: vse16.v v8, (a0)
; LMULMAX4-NEXT: ret
;
; LMULMAX1-LABEL: gather_const_v16i16:
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: addi a1, a0, 50
; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; LMULMAX1-NEXT: vlse16.v v25, (a1), zero
; LMULMAX1-NEXT: vlse16.v v8, (a1), zero
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: addi a2, a0, 48
; LMULMAX1-NEXT: addi a3, a0, 32
; LMULMAX1-NEXT: vse16.v v25, (a3)
; LMULMAX1-NEXT: vse16.v v25, (a2)
; LMULMAX1-NEXT: vse16.v v25, (a0)
; LMULMAX1-NEXT: vse16.v v25, (a1)
; LMULMAX1-NEXT: vse16.v v8, (a3)
; LMULMAX1-NEXT: vse16.v v8, (a2)
; LMULMAX1-NEXT: vse16.v v8, (a0)
; LMULMAX1-NEXT: vse16.v v8, (a1)
; LMULMAX1-NEXT: ret
%a = load <32 x i16>, <32 x i16>* %x
%b = extractelement <32 x i16> %a, i32 25
Expand All @@ -134,22 +134,22 @@ define void @gather_const_v16i32(<16 x i32>* %x) {
; LMULMAX4: # %bb.0:
; LMULMAX4-NEXT: addi a1, a0, 36
; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; LMULMAX4-NEXT: vlse32.v v28, (a1), zero
; LMULMAX4-NEXT: vse32.v v28, (a0)
; LMULMAX4-NEXT: vlse32.v v8, (a1), zero
; LMULMAX4-NEXT: vse32.v v8, (a0)
; LMULMAX4-NEXT: ret
;
; LMULMAX1-LABEL: gather_const_v16i32:
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: addi a1, a0, 36
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; LMULMAX1-NEXT: vlse32.v v25, (a1), zero
; LMULMAX1-NEXT: vlse32.v v8, (a1), zero
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: addi a2, a0, 48
; LMULMAX1-NEXT: addi a3, a0, 32
; LMULMAX1-NEXT: vse32.v v25, (a3)
; LMULMAX1-NEXT: vse32.v v25, (a2)
; LMULMAX1-NEXT: vse32.v v25, (a0)
; LMULMAX1-NEXT: vse32.v v25, (a1)
; LMULMAX1-NEXT: vse32.v v8, (a3)
; LMULMAX1-NEXT: vse32.v v8, (a2)
; LMULMAX1-NEXT: vse32.v v8, (a0)
; LMULMAX1-NEXT: vse32.v v8, (a1)
; LMULMAX1-NEXT: ret
%a = load <16 x i32>, <16 x i32>* %x
%b = extractelement <16 x i32> %a, i32 9
Expand All @@ -164,22 +164,22 @@ define void @gather_const_v8i64(<8 x i64>* %x) {
; LMULMAX4: # %bb.0:
; LMULMAX4-NEXT: addi a1, a0, 24
; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; LMULMAX4-NEXT: vlse64.v v28, (a1), zero
; LMULMAX4-NEXT: vse64.v v28, (a0)
; LMULMAX4-NEXT: vlse64.v v8, (a1), zero
; LMULMAX4-NEXT: vse64.v v8, (a0)
; LMULMAX4-NEXT: ret
;
; LMULMAX1-LABEL: gather_const_v8i64:
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: addi a1, a0, 24
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; LMULMAX1-NEXT: vlse64.v v25, (a1), zero
; LMULMAX1-NEXT: vlse64.v v8, (a1), zero
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: addi a2, a0, 48
; LMULMAX1-NEXT: addi a3, a0, 32
; LMULMAX1-NEXT: vse64.v v25, (a3)
; LMULMAX1-NEXT: vse64.v v25, (a2)
; LMULMAX1-NEXT: vse64.v v25, (a0)
; LMULMAX1-NEXT: vse64.v v25, (a1)
; LMULMAX1-NEXT: vse64.v v8, (a3)
; LMULMAX1-NEXT: vse64.v v8, (a2)
; LMULMAX1-NEXT: vse64.v v8, (a0)
; LMULMAX1-NEXT: vse64.v v8, (a1)
; LMULMAX1-NEXT: ret
%a = load <8 x i64>, <8 x i64>* %x
%b = extractelement <8 x i64> %a, i32 3
Expand All @@ -194,8 +194,8 @@ define void @splat_concat_low(<4 x i16>* %x, <4 x i16>* %y, <8 x i16>* %z) {
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, a0, 2
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vlse16.v v25, (a0), zero
; CHECK-NEXT: vse16.v v25, (a2)
; CHECK-NEXT: vlse16.v v8, (a0), zero
; CHECK-NEXT: vse16.v v8, (a2)
; CHECK-NEXT: ret
%a = load <4 x i16>, <4 x i16>* %x
%b = load <4 x i16>, <4 x i16>* %y
Expand All @@ -210,8 +210,8 @@ define void @splat_concat_high(<4 x i16>* %x, <4 x i16>* %y, <8 x i16>* %z) {
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, a1, 2
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vlse16.v v25, (a0), zero
; CHECK-NEXT: vse16.v v25, (a2)
; CHECK-NEXT: vlse16.v v8, (a0), zero
; CHECK-NEXT: vse16.v v8, (a2)
; CHECK-NEXT: ret
%a = load <4 x i16>, <4 x i16>* %x
%b = load <4 x i16>, <4 x i16>* %y
Expand Down
5,024 changes: 2,512 additions & 2,512 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll

Large diffs are not rendered by default.

100 changes: 50 additions & 50 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@ define <1 x i1> @buildvec_mask_nonconst_v1i1(i1 %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; CHECK-NEXT: vmv.v.x v25, a0
; CHECK-NEXT: vmsne.vi v0, v25, 0
; CHECK-NEXT: vmv.v.x v8, a0
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
%1 = insertelement <1 x i1> undef, i1 %x, i32 0
ret <1 x i1> %1
Expand All @@ -25,8 +25,8 @@ define <1 x i1> @buildvec_mask_optsize_nonconst_v1i1(i1 %x) optsize {
; CHECK: # %bb.0:
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; CHECK-NEXT: vmv.v.x v25, a0
; CHECK-NEXT: vmsne.vi v0, v25, 0
; CHECK-NEXT: vmv.v.x v8, a0
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
%1 = insertelement <1 x i1> undef, i1 %x, i32 0
ret <1 x i1> %1
Expand All @@ -36,12 +36,12 @@ define <2 x i1> @buildvec_mask_nonconst_v2i1(i1 %x, i1 %y) {
; CHECK-LABEL: buildvec_mask_nonconst_v2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: vmv.v.x v8, a1
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu
; CHECK-NEXT: vmv.s.x v25, a0
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vand.vi v25, v25, 1
; CHECK-NEXT: vmsne.vi v0, v25, 0
; CHECK-NEXT: vand.vi v8, v8, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
%1 = insertelement <2 x i1> undef, i1 %x, i32 0
%2 = insertelement <2 x i1> %1, i1 %y, i32 1
Expand All @@ -58,9 +58,9 @@ define <2 x i1> @buildvec_mask_optsize_nonconst_v2i1(i1 %x, i1 %y) optsize {
; CHECK-NEXT: sb a0, 14(sp)
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: addi a0, sp, 14
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vand.vi v25, v25, 1
; CHECK-NEXT: vmsne.vi v0, v25, 0
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vand.vi v8, v8, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%1 = insertelement <2 x i1> undef, i1 %x, i32 0
Expand Down Expand Up @@ -105,10 +105,10 @@ define <4 x i1> @buildvec_mask_nonconst_v4i1(i1 %x, i1 %y) {
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; CHECK-NEXT: vmv.s.x v0, a2
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: vmerge.vxm v25, v25, a0, v0
; CHECK-NEXT: vand.vi v25, v25, 1
; CHECK-NEXT: vmsne.vi v0, v25, 0
; CHECK-NEXT: vmv.v.x v8, a1
; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
; CHECK-NEXT: vand.vi v8, v8, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
%1 = insertelement <4 x i1> undef, i1 %x, i32 0
%2 = insertelement <4 x i1> %1, i1 %x, i32 1
Expand All @@ -129,9 +129,9 @@ define <4 x i1> @buildvec_mask_optsize_nonconst_v4i1(i1 %x, i1 %y) optsize {
; CHECK-NEXT: sb a0, 12(sp)
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: addi a0, sp, 12
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vand.vi v25, v25, 1
; CHECK-NEXT: vmsne.vi v0, v25, 0
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vand.vi v8, v8, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%1 = insertelement <4 x i1> undef, i1 %x, i32 0
Expand All @@ -153,9 +153,9 @@ define <4 x i1> @buildvec_mask_nonconst_v4i1_2(i1 %x, i1 %y) {
; CHECK-NEXT: sb zero, 12(sp)
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: addi a0, sp, 12
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vand.vi v25, v25, 1
; CHECK-NEXT: vmsne.vi v0, v25, 0
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vand.vi v8, v8, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%1 = insertelement <4 x i1> undef, i1 0, i32 0
Expand All @@ -182,10 +182,10 @@ define <8 x i1> @buildvec_mask_nonconst_v8i1(i1 %x, i1 %y) {
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; CHECK-NEXT: vmv.s.x v0, a2
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: vmerge.vxm v25, v25, a0, v0
; CHECK-NEXT: vand.vi v25, v25, 1
; CHECK-NEXT: vmsne.vi v0, v25, 0
; CHECK-NEXT: vmv.v.x v8, a1
; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
; CHECK-NEXT: vand.vi v8, v8, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
%1 = insertelement <8 x i1> undef, i1 %x, i32 0
%2 = insertelement <8 x i1> %1, i1 %x, i32 1
Expand Down Expand Up @@ -214,9 +214,9 @@ define <8 x i1> @buildvec_mask_nonconst_v8i1_2(i1 %x, i1 %y, i1 %z, i1 %w) {
; CHECK-NEXT: sb a0, 8(sp)
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vand.vi v25, v25, 1
; CHECK-NEXT: vmsne.vi v0, v25, 0
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vand.vi v8, v8, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%1 = insertelement <8 x i1> undef, i1 %x, i32 0
Expand Down Expand Up @@ -246,9 +246,9 @@ define <8 x i1> @buildvec_mask_optsize_nonconst_v8i1_2(i1 %x, i1 %y, i1 %z, i1 %
; CHECK-NEXT: sb a0, 8(sp)
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vand.vi v25, v25, 1
; CHECK-NEXT: vmsne.vi v0, v25, 0
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vand.vi v8, v8, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%1 = insertelement <8 x i1> undef, i1 %x, i32 0
Expand Down Expand Up @@ -277,9 +277,9 @@ define <8 x i1> @buildvec_mask_optsize_nonconst_v8i1(i1 %x, i1 %y) optsize {
; CHECK-NEXT: sb a0, 8(sp)
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vand.vi v25, v25, 1
; CHECK-NEXT: vmsne.vi v0, v25, 0
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vand.vi v8, v8, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%1 = insertelement <8 x i1> undef, i1 %x, i32 0
Expand Down Expand Up @@ -459,12 +459,12 @@ define <64 x i1> @buildvec_mask_v64i1() {
; RV32-LMULMAX4-NEXT: lui a0, 748388
; RV32-LMULMAX4-NEXT: addi a0, a0, -1793
; RV32-LMULMAX4-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; RV32-LMULMAX4-NEXT: vmv.s.x v25, a0
; RV32-LMULMAX4-NEXT: vmv.s.x v8, a0
; RV32-LMULMAX4-NEXT: lui a0, 748384
; RV32-LMULMAX4-NEXT: addi a0, a0, 1776
; RV32-LMULMAX4-NEXT: vmv.s.x v0, a0
; RV32-LMULMAX4-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
; RV32-LMULMAX4-NEXT: vslideup.vi v0, v25, 1
; RV32-LMULMAX4-NEXT: vslideup.vi v0, v8, 1
; RV32-LMULMAX4-NEXT: ret
;
; RV64-LMULMAX4-LABEL: buildvec_mask_v64i1:
Expand All @@ -486,12 +486,12 @@ define <64 x i1> @buildvec_mask_v64i1() {
; RV32-LMULMAX8-NEXT: lui a0, 748388
; RV32-LMULMAX8-NEXT: addi a0, a0, -1793
; RV32-LMULMAX8-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; RV32-LMULMAX8-NEXT: vmv.s.x v25, a0
; RV32-LMULMAX8-NEXT: vmv.s.x v8, a0
; RV32-LMULMAX8-NEXT: lui a0, 748384
; RV32-LMULMAX8-NEXT: addi a0, a0, 1776
; RV32-LMULMAX8-NEXT: vmv.s.x v0, a0
; RV32-LMULMAX8-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
; RV32-LMULMAX8-NEXT: vslideup.vi v0, v25, 1
; RV32-LMULMAX8-NEXT: vslideup.vi v0, v8, 1
; RV32-LMULMAX8-NEXT: ret
;
; RV64-LMULMAX8-LABEL: buildvec_mask_v64i1:
Expand Down Expand Up @@ -594,21 +594,21 @@ define <128 x i1> @buildvec_mask_v128i1() {
; RV32-LMULMAX4-NEXT: lui a0, 748388
; RV32-LMULMAX4-NEXT: addi a0, a0, -1793
; RV32-LMULMAX4-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
; RV32-LMULMAX4-NEXT: vmv.s.x v25, a0
; RV32-LMULMAX4-NEXT: vmv.s.x v8, a0
; RV32-LMULMAX4-NEXT: lui a0, 748384
; RV32-LMULMAX4-NEXT: addi a0, a0, 1776
; RV32-LMULMAX4-NEXT: vmv.s.x v0, a0
; RV32-LMULMAX4-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
; RV32-LMULMAX4-NEXT: vslideup.vi v0, v25, 1
; RV32-LMULMAX4-NEXT: vslideup.vi v0, v8, 1
; RV32-LMULMAX4-NEXT: lui a0, 945060
; RV32-LMULMAX4-NEXT: addi a0, a0, -1793
; RV32-LMULMAX4-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; RV32-LMULMAX4-NEXT: vmv.s.x v25, a0
; RV32-LMULMAX4-NEXT: vmv.s.x v9, a0
; RV32-LMULMAX4-NEXT: lui a0, 551776
; RV32-LMULMAX4-NEXT: addi a0, a0, 1776
; RV32-LMULMAX4-NEXT: vmv.s.x v8, a0
; RV32-LMULMAX4-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
; RV32-LMULMAX4-NEXT: vslideup.vi v8, v25, 1
; RV32-LMULMAX4-NEXT: vslideup.vi v8, v9, 1
; RV32-LMULMAX4-NEXT: ret
;
; RV64-LMULMAX4-LABEL: buildvec_mask_v128i1:
Expand Down Expand Up @@ -637,24 +637,24 @@ define <128 x i1> @buildvec_mask_v128i1() {
; RV32-LMULMAX8-NEXT: lui a0, 748388
; RV32-LMULMAX8-NEXT: addi a0, a0, -1793
; RV32-LMULMAX8-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; RV32-LMULMAX8-NEXT: vmv.s.x v25, a0
; RV32-LMULMAX8-NEXT: vmv.s.x v8, a0
; RV32-LMULMAX8-NEXT: lui a0, 748384
; RV32-LMULMAX8-NEXT: addi a0, a0, 1776
; RV32-LMULMAX8-NEXT: vmv.s.x v0, a0
; RV32-LMULMAX8-NEXT: vsetivli zero, 2, e32, m1, tu, mu
; RV32-LMULMAX8-NEXT: vslideup.vi v0, v25, 1
; RV32-LMULMAX8-NEXT: vslideup.vi v0, v8, 1
; RV32-LMULMAX8-NEXT: lui a0, 551776
; RV32-LMULMAX8-NEXT: addi a0, a0, 1776
; RV32-LMULMAX8-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; RV32-LMULMAX8-NEXT: vmv.s.x v25, a0
; RV32-LMULMAX8-NEXT: vmv.s.x v8, a0
; RV32-LMULMAX8-NEXT: vsetivli zero, 3, e32, m1, tu, mu
; RV32-LMULMAX8-NEXT: vslideup.vi v0, v25, 2
; RV32-LMULMAX8-NEXT: vslideup.vi v0, v8, 2
; RV32-LMULMAX8-NEXT: lui a0, 945060
; RV32-LMULMAX8-NEXT: addi a0, a0, -1793
; RV32-LMULMAX8-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; RV32-LMULMAX8-NEXT: vmv.s.x v25, a0
; RV32-LMULMAX8-NEXT: vmv.s.x v8, a0
; RV32-LMULMAX8-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; RV32-LMULMAX8-NEXT: vslideup.vi v0, v25, 3
; RV32-LMULMAX8-NEXT: vslideup.vi v0, v8, 3
; RV32-LMULMAX8-NEXT: ret
;
; RV64-LMULMAX8-LABEL: buildvec_mask_v128i1:
Expand All @@ -666,7 +666,7 @@ define <128 x i1> @buildvec_mask_v128i1() {
; RV64-LMULMAX8-NEXT: slli a0, a0, 17
; RV64-LMULMAX8-NEXT: addi a0, a0, 1776
; RV64-LMULMAX8-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV64-LMULMAX8-NEXT: vmv.s.x v25, a0
; RV64-LMULMAX8-NEXT: vmv.s.x v8, a0
; RV64-LMULMAX8-NEXT: lui a0, 1048429
; RV64-LMULMAX8-NEXT: addiw a0, a0, 1735
; RV64-LMULMAX8-NEXT: slli a0, a0, 13
Expand All @@ -677,7 +677,7 @@ define <128 x i1> @buildvec_mask_v128i1() {
; RV64-LMULMAX8-NEXT: addi a0, a0, 1776
; RV64-LMULMAX8-NEXT: vmv.s.x v0, a0
; RV64-LMULMAX8-NEXT: vsetvli zero, zero, e64, m1, tu, mu
; RV64-LMULMAX8-NEXT: vslideup.vi v0, v25, 1
; RV64-LMULMAX8-NEXT: vslideup.vi v0, v8, 1
; RV64-LMULMAX8-NEXT: ret
ret <128 x i1> <i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 0, i1 1, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 1, i1 1, i1 1>
}
Expand Down
48 changes: 24 additions & 24 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-load-store.ll
Original file line number Diff line number Diff line change
Expand Up @@ -9,15 +9,15 @@ define void @load_store_v1i1(<1 x i1>* %x, <1 x i1>* %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; CHECK-NEXT: vlm.v v0, (a0)
; CHECK-NEXT: vmv.v.i v25, 0
; CHECK-NEXT: vmerge.vim v25, v25, 1, v0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v26, 0
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vsetivli zero, 1, e8, mf2, tu, mu
; CHECK-NEXT: vslideup.vi v26, v25, 0
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v25, v26, 0
; CHECK-NEXT: vsm.v v25, (a1)
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
%a = load <1 x i1>, <1 x i1>* %x
store <1 x i1> %a, <1 x i1>* %y
Expand All @@ -29,15 +29,15 @@ define void @load_store_v2i1(<2 x i1>* %x, <2 x i1>* %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vlm.v v0, (a0)
; CHECK-NEXT: vmv.v.i v25, 0
; CHECK-NEXT: vmerge.vim v25, v25, 1, v0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v26, 0
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
; CHECK-NEXT: vslideup.vi v26, v25, 0
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v25, v26, 0
; CHECK-NEXT: vsm.v v25, (a1)
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
%a = load <2 x i1>, <2 x i1>* %x
store <2 x i1> %a, <2 x i1>* %y
Expand All @@ -49,15 +49,15 @@ define void @load_store_v4i1(<4 x i1>* %x, <4 x i1>* %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vlm.v v0, (a0)
; CHECK-NEXT: vmv.v.i v25, 0
; CHECK-NEXT: vmerge.vim v25, v25, 1, v0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v26, 0
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
; CHECK-NEXT: vslideup.vi v26, v25, 0
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v25, v26, 0
; CHECK-NEXT: vsm.v v25, (a1)
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
%a = load <4 x i1>, <4 x i1>* %x
store <4 x i1> %a, <4 x i1>* %y
Expand All @@ -68,8 +68,8 @@ define void @load_store_v8i1(<8 x i1>* %x, <8 x i1>* %y) {
; CHECK-LABEL: load_store_v8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vlm.v v25, (a0)
; CHECK-NEXT: vsm.v v25, (a1)
; CHECK-NEXT: vlm.v v8, (a0)
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
%a = load <8 x i1>, <8 x i1>* %x
store <8 x i1> %a, <8 x i1>* %y
Expand All @@ -80,8 +80,8 @@ define void @load_store_v16i1(<16 x i1>* %x, <16 x i1>* %y) {
; CHECK-LABEL: load_store_v16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vlm.v v25, (a0)
; CHECK-NEXT: vsm.v v25, (a1)
; CHECK-NEXT: vlm.v v8, (a0)
; CHECK-NEXT: vsm.v v8, (a1)
; CHECK-NEXT: ret
%a = load <16 x i1>, <16 x i1>* %x
store <16 x i1> %a, <16 x i1>* %y
Expand All @@ -93,8 +93,8 @@ define void @load_store_v32i1(<32 x i1>* %x, <32 x i1>* %y) {
; LMULMAX2: # %bb.0:
; LMULMAX2-NEXT: addi a2, zero, 32
; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu
; LMULMAX2-NEXT: vlm.v v25, (a0)
; LMULMAX2-NEXT: vsm.v v25, (a1)
; LMULMAX2-NEXT: vlm.v v8, (a0)
; LMULMAX2-NEXT: vsm.v v8, (a1)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-RV32-LABEL: load_store_v32i1:
Expand Down
78 changes: 39 additions & 39 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-logic.ll
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,10 @@ define void @and_v8i1(<8 x i1>* %x, <8 x i1>* %y) {
; CHECK-LABEL: and_v8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vlm.v v25, (a0)
; CHECK-NEXT: vlm.v v26, (a1)
; CHECK-NEXT: vmand.mm v25, v25, v26
; CHECK-NEXT: vsm.v v25, (a0)
; CHECK-NEXT: vlm.v v8, (a0)
; CHECK-NEXT: vlm.v v9, (a1)
; CHECK-NEXT: vmand.mm v8, v8, v9
; CHECK-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: ret
%a = load <8 x i1>, <8 x i1>* %x
%b = load <8 x i1>, <8 x i1>* %y
Expand All @@ -24,10 +24,10 @@ define void @or_v16i1(<16 x i1>* %x, <16 x i1>* %y) {
; CHECK-LABEL: or_v16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vlm.v v25, (a0)
; CHECK-NEXT: vlm.v v26, (a1)
; CHECK-NEXT: vmor.mm v25, v25, v26
; CHECK-NEXT: vsm.v v25, (a0)
; CHECK-NEXT: vlm.v v8, (a0)
; CHECK-NEXT: vlm.v v9, (a1)
; CHECK-NEXT: vmor.mm v8, v8, v9
; CHECK-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: ret
%a = load <16 x i1>, <16 x i1>* %x
%b = load <16 x i1>, <16 x i1>* %y
Expand All @@ -41,10 +41,10 @@ define void @xor_v32i1(<32 x i1>* %x, <32 x i1>* %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: addi a2, zero, 32
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
; CHECK-NEXT: vlm.v v25, (a0)
; CHECK-NEXT: vlm.v v26, (a1)
; CHECK-NEXT: vmxor.mm v25, v25, v26
; CHECK-NEXT: vsm.v v25, (a0)
; CHECK-NEXT: vlm.v v8, (a0)
; CHECK-NEXT: vlm.v v9, (a1)
; CHECK-NEXT: vmxor.mm v8, v8, v9
; CHECK-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: ret
%a = load <32 x i1>, <32 x i1>* %x
%b = load <32 x i1>, <32 x i1>* %y
Expand All @@ -58,9 +58,9 @@ define void @not_v64i1(<64 x i1>* %x, <64 x i1>* %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: addi a1, zero, 64
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vlm.v v25, (a0)
; CHECK-NEXT: vmnand.mm v25, v25, v25
; CHECK-NEXT: vsm.v v25, (a0)
; CHECK-NEXT: vlm.v v8, (a0)
; CHECK-NEXT: vmnand.mm v8, v8, v8
; CHECK-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: ret
%a = load <64 x i1>, <64 x i1>* %x
%b = load <64 x i1>, <64 x i1>* %y
Expand All @@ -73,10 +73,10 @@ define void @andnot_v8i1(<8 x i1>* %x, <8 x i1>* %y) {
; CHECK-LABEL: andnot_v8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vlm.v v25, (a0)
; CHECK-NEXT: vlm.v v26, (a1)
; CHECK-NEXT: vmandnot.mm v25, v26, v25
; CHECK-NEXT: vsm.v v25, (a0)
; CHECK-NEXT: vlm.v v8, (a0)
; CHECK-NEXT: vlm.v v9, (a1)
; CHECK-NEXT: vmandnot.mm v8, v9, v8
; CHECK-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: ret
%a = load <8 x i1>, <8 x i1>* %x
%b = load <8 x i1>, <8 x i1>* %y
Expand All @@ -90,10 +90,10 @@ define void @ornot_v16i1(<16 x i1>* %x, <16 x i1>* %y) {
; CHECK-LABEL: ornot_v16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vlm.v v25, (a0)
; CHECK-NEXT: vlm.v v26, (a1)
; CHECK-NEXT: vmornot.mm v25, v26, v25
; CHECK-NEXT: vsm.v v25, (a0)
; CHECK-NEXT: vlm.v v8, (a0)
; CHECK-NEXT: vlm.v v9, (a1)
; CHECK-NEXT: vmornot.mm v8, v9, v8
; CHECK-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: ret
%a = load <16 x i1>, <16 x i1>* %x
%b = load <16 x i1>, <16 x i1>* %y
Expand All @@ -108,10 +108,10 @@ define void @xornot_v32i1(<32 x i1>* %x, <32 x i1>* %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: addi a2, zero, 32
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
; CHECK-NEXT: vlm.v v25, (a0)
; CHECK-NEXT: vlm.v v26, (a1)
; CHECK-NEXT: vmxnor.mm v25, v25, v26
; CHECK-NEXT: vsm.v v25, (a0)
; CHECK-NEXT: vlm.v v8, (a0)
; CHECK-NEXT: vlm.v v9, (a1)
; CHECK-NEXT: vmxnor.mm v8, v8, v9
; CHECK-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: ret
%a = load <32 x i1>, <32 x i1>* %x
%b = load <32 x i1>, <32 x i1>* %y
Expand All @@ -125,10 +125,10 @@ define void @nand_v8i1(<8 x i1>* %x, <8 x i1>* %y) {
; CHECK-LABEL: nand_v8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vlm.v v25, (a0)
; CHECK-NEXT: vlm.v v26, (a1)
; CHECK-NEXT: vmnand.mm v25, v25, v26
; CHECK-NEXT: vsm.v v25, (a0)
; CHECK-NEXT: vlm.v v8, (a0)
; CHECK-NEXT: vlm.v v9, (a1)
; CHECK-NEXT: vmnand.mm v8, v8, v9
; CHECK-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: ret
%a = load <8 x i1>, <8 x i1>* %x
%b = load <8 x i1>, <8 x i1>* %y
Expand All @@ -142,10 +142,10 @@ define void @nor_v16i1(<16 x i1>* %x, <16 x i1>* %y) {
; CHECK-LABEL: nor_v16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vlm.v v25, (a0)
; CHECK-NEXT: vlm.v v26, (a1)
; CHECK-NEXT: vmnor.mm v25, v25, v26
; CHECK-NEXT: vsm.v v25, (a0)
; CHECK-NEXT: vlm.v v8, (a0)
; CHECK-NEXT: vlm.v v9, (a1)
; CHECK-NEXT: vmnor.mm v8, v8, v9
; CHECK-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: ret
%a = load <16 x i1>, <16 x i1>* %x
%b = load <16 x i1>, <16 x i1>* %y
Expand All @@ -160,10 +160,10 @@ define void @xnor_v32i1(<32 x i1>* %x, <32 x i1>* %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: addi a2, zero, 32
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
; CHECK-NEXT: vlm.v v25, (a0)
; CHECK-NEXT: vlm.v v26, (a1)
; CHECK-NEXT: vmxnor.mm v25, v25, v26
; CHECK-NEXT: vsm.v v25, (a0)
; CHECK-NEXT: vlm.v v8, (a0)
; CHECK-NEXT: vlm.v v9, (a1)
; CHECK-NEXT: vmxnor.mm v8, v8, v9
; CHECK-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: ret
%a = load <32 x i1>, <32 x i1>* %x
%b = load <32 x i1>, <32 x i1>* %y
Expand Down
200 changes: 100 additions & 100 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll
Original file line number Diff line number Diff line change
Expand Up @@ -9,15 +9,15 @@ define void @splat_ones_v1i1(<1 x i1>* %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; CHECK-NEXT: vmset.m v0
; CHECK-NEXT: vmv.v.i v25, 0
; CHECK-NEXT: vmerge.vim v25, v25, 1, v0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v26, 0
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vsetivli zero, 1, e8, mf2, tu, mu
; CHECK-NEXT: vslideup.vi v26, v25, 0
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v25, v26, 0
; CHECK-NEXT: vsm.v v25, (a0)
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: ret
store <1 x i1> <i1 1>, <1 x i1>* %x
ret void
Expand All @@ -28,15 +28,15 @@ define void @splat_zeros_v2i1(<2 x i1>* %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vmclr.m v0
; CHECK-NEXT: vmv.v.i v25, 0
; CHECK-NEXT: vmerge.vim v25, v25, 1, v0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v26, 0
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, mu
; CHECK-NEXT: vslideup.vi v26, v25, 0
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v25, v26, 0
; CHECK-NEXT: vsm.v v25, (a0)
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: ret
store <2 x i1> zeroinitializer, <2 x i1>* %x
ret void
Expand All @@ -47,17 +47,17 @@ define void @splat_v1i1(<1 x i1>* %x, i1 %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: vmsne.vi v0, v25, 0
; CHECK-NEXT: vmv.v.i v25, 0
; CHECK-NEXT: vmerge.vim v25, v25, 1, v0
; CHECK-NEXT: vmv.v.x v8, a1
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v26, 0
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vsetivli zero, 1, e8, mf2, tu, mu
; CHECK-NEXT: vslideup.vi v26, v25, 0
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v25, v26, 0
; CHECK-NEXT: vsm.v v25, (a0)
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <1 x i1> undef, i1 %y, i32 0
%b = shufflevector <1 x i1> %a, <1 x i1> undef, <1 x i32> zeroinitializer
Expand All @@ -71,17 +71,17 @@ define void @splat_v1i1_icmp(<1 x i1>* %x, i32 signext %y, i32 signext %z) {
; CHECK-NEXT: xor a1, a1, a2
; CHECK-NEXT: seqz a1, a1
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: vmsne.vi v0, v25, 0
; CHECK-NEXT: vmv.v.i v25, 0
; CHECK-NEXT: vmerge.vim v25, v25, 1, v0
; CHECK-NEXT: vmv.v.x v8, a1
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v26, 0
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vsetivli zero, 1, e8, mf2, tu, mu
; CHECK-NEXT: vslideup.vi v26, v25, 0
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v25, v26, 0
; CHECK-NEXT: vsm.v v25, (a0)
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: ret
%c = icmp eq i32 %y, %z
%a = insertelement <1 x i1> undef, i1 %c, i32 0
Expand All @@ -95,15 +95,15 @@ define void @splat_ones_v4i1(<4 x i1>* %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vmset.m v0
; CHECK-NEXT: vmv.v.i v25, 0
; CHECK-NEXT: vmerge.vim v25, v25, 1, v0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v26, 0
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
; CHECK-NEXT: vslideup.vi v26, v25, 0
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v25, v26, 0
; CHECK-NEXT: vsm.v v25, (a0)
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: ret
store <4 x i1> <i1 1, i1 1, i1 1, i1 1>, <4 x i1>* %x
ret void
Expand All @@ -114,17 +114,17 @@ define void @splat_v4i1(<4 x i1>* %x, i1 %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: vmsne.vi v0, v25, 0
; CHECK-NEXT: vmv.v.i v25, 0
; CHECK-NEXT: vmerge.vim v25, v25, 1, v0
; CHECK-NEXT: vmv.v.x v8, a1
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.i v26, 0
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, mu
; CHECK-NEXT: vslideup.vi v26, v25, 0
; CHECK-NEXT: vslideup.vi v9, v8, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v25, v26, 0
; CHECK-NEXT: vsm.v v25, (a0)
; CHECK-NEXT: vmsne.vi v8, v9, 0
; CHECK-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <4 x i1> undef, i1 %y, i32 0
%b = shufflevector <4 x i1> %a, <4 x i1> undef, <4 x i32> zeroinitializer
Expand All @@ -136,8 +136,8 @@ define void @splat_zeros_v8i1(<8 x i1>* %x) {
; CHECK-LABEL: splat_zeros_v8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmclr.m v25
; CHECK-NEXT: vsm.v v25, (a0)
; CHECK-NEXT: vmclr.m v8
; CHECK-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: ret
store <8 x i1> zeroinitializer, <8 x i1>* %x
ret void
Expand All @@ -148,9 +148,9 @@ define void @splat_v8i1(<8 x i1>* %x, i1 %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: vmsne.vi v25, v25, 0
; CHECK-NEXT: vsm.v v25, (a0)
; CHECK-NEXT: vmv.v.x v8, a1
; CHECK-NEXT: vmsne.vi v8, v8, 0
; CHECK-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <8 x i1> undef, i1 %y, i32 0
%b = shufflevector <8 x i1> %a, <8 x i1> undef, <8 x i32> zeroinitializer
Expand All @@ -162,8 +162,8 @@ define void @splat_ones_v16i1(<16 x i1>* %x) {
; CHECK-LABEL: splat_ones_v16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vmset.m v25
; CHECK-NEXT: vsm.v v25, (a0)
; CHECK-NEXT: vmset.m v8
; CHECK-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: ret
store <16 x i1> <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>, <16 x i1>* %x
ret void
Expand All @@ -174,9 +174,9 @@ define void @splat_v16i1(<16 x i1>* %x, i1 %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: vmsne.vi v25, v25, 0
; CHECK-NEXT: vsm.v v25, (a0)
; CHECK-NEXT: vmv.v.x v8, a1
; CHECK-NEXT: vmsne.vi v8, v8, 0
; CHECK-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: ret
%a = insertelement <16 x i1> undef, i1 %y, i32 0
%b = shufflevector <16 x i1> %a, <16 x i1> undef, <16 x i32> zeroinitializer
Expand All @@ -189,26 +189,26 @@ define void @splat_zeros_v32i1(<32 x i1>* %x) {
; LMULMAX2: # %bb.0:
; LMULMAX2-NEXT: addi a1, zero, 32
; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; LMULMAX2-NEXT: vmclr.m v25
; LMULMAX2-NEXT: vsm.v v25, (a0)
; LMULMAX2-NEXT: vmclr.m v8
; LMULMAX2-NEXT: vsm.v v8, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-RV32-LABEL: splat_zeros_v32i1:
; LMULMAX1-RV32: # %bb.0:
; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; LMULMAX1-RV32-NEXT: vmclr.m v25
; LMULMAX1-RV32-NEXT: vsm.v v25, (a0)
; LMULMAX1-RV32-NEXT: vmclr.m v8
; LMULMAX1-RV32-NEXT: vsm.v v8, (a0)
; LMULMAX1-RV32-NEXT: addi a0, a0, 2
; LMULMAX1-RV32-NEXT: vsm.v v25, (a0)
; LMULMAX1-RV32-NEXT: vsm.v v8, (a0)
; LMULMAX1-RV32-NEXT: ret
;
; LMULMAX1-RV64-LABEL: splat_zeros_v32i1:
; LMULMAX1-RV64: # %bb.0:
; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; LMULMAX1-RV64-NEXT: vmclr.m v25
; LMULMAX1-RV64-NEXT: vsm.v v25, (a0)
; LMULMAX1-RV64-NEXT: vmclr.m v8
; LMULMAX1-RV64-NEXT: vsm.v v8, (a0)
; LMULMAX1-RV64-NEXT: addi a0, a0, 2
; LMULMAX1-RV64-NEXT: vsm.v v25, (a0)
; LMULMAX1-RV64-NEXT: vsm.v v8, (a0)
; LMULMAX1-RV64-NEXT: ret
store <32 x i1> zeroinitializer, <32 x i1>* %x
ret void
Expand All @@ -220,31 +220,31 @@ define void @splat_v32i1(<32 x i1>* %x, i1 %y) {
; LMULMAX2-NEXT: andi a1, a1, 1
; LMULMAX2-NEXT: addi a2, zero, 32
; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu
; LMULMAX2-NEXT: vmv.v.x v26, a1
; LMULMAX2-NEXT: vmsne.vi v25, v26, 0
; LMULMAX2-NEXT: vsm.v v25, (a0)
; LMULMAX2-NEXT: vmv.v.x v8, a1
; LMULMAX2-NEXT: vmsne.vi v10, v8, 0
; LMULMAX2-NEXT: vsm.v v10, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-RV32-LABEL: splat_v32i1:
; LMULMAX1-RV32: # %bb.0:
; LMULMAX1-RV32-NEXT: andi a1, a1, 1
; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; LMULMAX1-RV32-NEXT: vmv.v.x v25, a1
; LMULMAX1-RV32-NEXT: vmsne.vi v25, v25, 0
; LMULMAX1-RV32-NEXT: vmv.v.x v8, a1
; LMULMAX1-RV32-NEXT: vmsne.vi v8, v8, 0
; LMULMAX1-RV32-NEXT: addi a1, a0, 2
; LMULMAX1-RV32-NEXT: vsm.v v25, (a1)
; LMULMAX1-RV32-NEXT: vsm.v v25, (a0)
; LMULMAX1-RV32-NEXT: vsm.v v8, (a1)
; LMULMAX1-RV32-NEXT: vsm.v v8, (a0)
; LMULMAX1-RV32-NEXT: ret
;
; LMULMAX1-RV64-LABEL: splat_v32i1:
; LMULMAX1-RV64: # %bb.0:
; LMULMAX1-RV64-NEXT: andi a1, a1, 1
; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; LMULMAX1-RV64-NEXT: vmv.v.x v25, a1
; LMULMAX1-RV64-NEXT: vmsne.vi v25, v25, 0
; LMULMAX1-RV64-NEXT: vmv.v.x v8, a1
; LMULMAX1-RV64-NEXT: vmsne.vi v8, v8, 0
; LMULMAX1-RV64-NEXT: addi a1, a0, 2
; LMULMAX1-RV64-NEXT: vsm.v v25, (a1)
; LMULMAX1-RV64-NEXT: vsm.v v25, (a0)
; LMULMAX1-RV64-NEXT: vsm.v v8, (a1)
; LMULMAX1-RV64-NEXT: vsm.v v8, (a0)
; LMULMAX1-RV64-NEXT: ret
%a = insertelement <32 x i1> undef, i1 %y, i32 0
%b = shufflevector <32 x i1> %a, <32 x i1> undef, <32 x i32> zeroinitializer
Expand All @@ -258,35 +258,35 @@ define void @splat_ones_v64i1(<64 x i1>* %x) {
; LMULMAX2-NEXT: addi a1, a0, 4
; LMULMAX2-NEXT: addi a2, zero, 32
; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu
; LMULMAX2-NEXT: vmset.m v25
; LMULMAX2-NEXT: vsm.v v25, (a1)
; LMULMAX2-NEXT: vsm.v v25, (a0)
; LMULMAX2-NEXT: vmset.m v8
; LMULMAX2-NEXT: vsm.v v8, (a1)
; LMULMAX2-NEXT: vsm.v v8, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-RV32-LABEL: splat_ones_v64i1:
; LMULMAX1-RV32: # %bb.0:
; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; LMULMAX1-RV32-NEXT: vmset.m v25
; LMULMAX1-RV32-NEXT: vsm.v v25, (a0)
; LMULMAX1-RV32-NEXT: vmset.m v8
; LMULMAX1-RV32-NEXT: vsm.v v8, (a0)
; LMULMAX1-RV32-NEXT: addi a1, a0, 6
; LMULMAX1-RV32-NEXT: vsm.v v25, (a1)
; LMULMAX1-RV32-NEXT: vsm.v v8, (a1)
; LMULMAX1-RV32-NEXT: addi a1, a0, 4
; LMULMAX1-RV32-NEXT: vsm.v v25, (a1)
; LMULMAX1-RV32-NEXT: vsm.v v8, (a1)
; LMULMAX1-RV32-NEXT: addi a0, a0, 2
; LMULMAX1-RV32-NEXT: vsm.v v25, (a0)
; LMULMAX1-RV32-NEXT: vsm.v v8, (a0)
; LMULMAX1-RV32-NEXT: ret
;
; LMULMAX1-RV64-LABEL: splat_ones_v64i1:
; LMULMAX1-RV64: # %bb.0:
; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; LMULMAX1-RV64-NEXT: vmset.m v25
; LMULMAX1-RV64-NEXT: vsm.v v25, (a0)
; LMULMAX1-RV64-NEXT: vmset.m v8
; LMULMAX1-RV64-NEXT: vsm.v v8, (a0)
; LMULMAX1-RV64-NEXT: addi a1, a0, 6
; LMULMAX1-RV64-NEXT: vsm.v v25, (a1)
; LMULMAX1-RV64-NEXT: vsm.v v8, (a1)
; LMULMAX1-RV64-NEXT: addi a1, a0, 4
; LMULMAX1-RV64-NEXT: vsm.v v25, (a1)
; LMULMAX1-RV64-NEXT: vsm.v v8, (a1)
; LMULMAX1-RV64-NEXT: addi a0, a0, 2
; LMULMAX1-RV64-NEXT: vsm.v v25, (a0)
; LMULMAX1-RV64-NEXT: vsm.v v8, (a0)
; LMULMAX1-RV64-NEXT: ret
store <64 x i1> <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>, <64 x i1>* %x
ret void
Expand All @@ -298,41 +298,41 @@ define void @splat_v64i1(<64 x i1>* %x, i1 %y) {
; LMULMAX2-NEXT: andi a1, a1, 1
; LMULMAX2-NEXT: addi a2, zero, 32
; LMULMAX2-NEXT: vsetvli zero, a2, e8, m2, ta, mu
; LMULMAX2-NEXT: vmv.v.x v26, a1
; LMULMAX2-NEXT: vmsne.vi v25, v26, 0
; LMULMAX2-NEXT: vmv.v.x v8, a1
; LMULMAX2-NEXT: vmsne.vi v10, v8, 0
; LMULMAX2-NEXT: addi a1, a0, 4
; LMULMAX2-NEXT: vsm.v v25, (a1)
; LMULMAX2-NEXT: vsm.v v25, (a0)
; LMULMAX2-NEXT: vsm.v v10, (a1)
; LMULMAX2-NEXT: vsm.v v10, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-RV32-LABEL: splat_v64i1:
; LMULMAX1-RV32: # %bb.0:
; LMULMAX1-RV32-NEXT: andi a1, a1, 1
; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; LMULMAX1-RV32-NEXT: vmv.v.x v25, a1
; LMULMAX1-RV32-NEXT: vmsne.vi v25, v25, 0
; LMULMAX1-RV32-NEXT: vmv.v.x v8, a1
; LMULMAX1-RV32-NEXT: vmsne.vi v8, v8, 0
; LMULMAX1-RV32-NEXT: addi a1, a0, 6
; LMULMAX1-RV32-NEXT: vsm.v v25, (a1)
; LMULMAX1-RV32-NEXT: vsm.v v8, (a1)
; LMULMAX1-RV32-NEXT: addi a1, a0, 4
; LMULMAX1-RV32-NEXT: vsm.v v25, (a1)
; LMULMAX1-RV32-NEXT: vsm.v v8, (a1)
; LMULMAX1-RV32-NEXT: addi a1, a0, 2
; LMULMAX1-RV32-NEXT: vsm.v v25, (a1)
; LMULMAX1-RV32-NEXT: vsm.v v25, (a0)
; LMULMAX1-RV32-NEXT: vsm.v v8, (a1)
; LMULMAX1-RV32-NEXT: vsm.v v8, (a0)
; LMULMAX1-RV32-NEXT: ret
;
; LMULMAX1-RV64-LABEL: splat_v64i1:
; LMULMAX1-RV64: # %bb.0:
; LMULMAX1-RV64-NEXT: andi a1, a1, 1
; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; LMULMAX1-RV64-NEXT: vmv.v.x v25, a1
; LMULMAX1-RV64-NEXT: vmsne.vi v25, v25, 0
; LMULMAX1-RV64-NEXT: vmv.v.x v8, a1
; LMULMAX1-RV64-NEXT: vmsne.vi v8, v8, 0
; LMULMAX1-RV64-NEXT: addi a1, a0, 6
; LMULMAX1-RV64-NEXT: vsm.v v25, (a1)
; LMULMAX1-RV64-NEXT: vsm.v v8, (a1)
; LMULMAX1-RV64-NEXT: addi a1, a0, 4
; LMULMAX1-RV64-NEXT: vsm.v v25, (a1)
; LMULMAX1-RV64-NEXT: vsm.v v8, (a1)
; LMULMAX1-RV64-NEXT: addi a1, a0, 2
; LMULMAX1-RV64-NEXT: vsm.v v25, (a1)
; LMULMAX1-RV64-NEXT: vsm.v v25, (a0)
; LMULMAX1-RV64-NEXT: vsm.v v8, (a1)
; LMULMAX1-RV64-NEXT: vsm.v v8, (a0)
; LMULMAX1-RV64-NEXT: ret
%a = insertelement <64 x i1> undef, i1 %y, i32 0
%b = shufflevector <64 x i1> %a, <64 x i1> undef, <64 x i32> zeroinitializer
Expand Down
Loading