106 changes: 53 additions & 53 deletions llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll

Large diffs are not rendered by default.

30 changes: 15 additions & 15 deletions llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,12 @@
define i1 @extractelt_nxv1i1(<vscale x 1 x i8>* %x, i64 %idx) nounwind {
; CHECK-LABEL: extractelt_nxv1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, mu
; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
Expand All @@ -23,12 +23,12 @@ define i1 @extractelt_nxv1i1(<vscale x 1 x i8>* %x, i64 %idx) nounwind {
define i1 @extractelt_nxv2i1(<vscale x 2 x i8>* %x, i64 %idx) nounwind {
; CHECK-LABEL: extractelt_nxv2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, mu
; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
Expand All @@ -41,12 +41,12 @@ define i1 @extractelt_nxv2i1(<vscale x 2 x i8>* %x, i64 %idx) nounwind {
define i1 @extractelt_nxv4i1(<vscale x 4 x i8>* %x, i64 %idx) nounwind {
; CHECK-LABEL: extractelt_nxv4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, mu
; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
Expand All @@ -60,11 +60,11 @@ define i1 @extractelt_nxv8i1(<vscale x 8 x i8>* %x, i64 %idx) nounwind {
; CHECK-LABEL: extractelt_nxv8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vl1r.v v8, (a0)
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
Expand All @@ -78,11 +78,11 @@ define i1 @extractelt_nxv16i1(<vscale x 16 x i8>* %x, i64 %idx) nounwind {
; CHECK-LABEL: extractelt_nxv16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vl2r.v v8, (a0)
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
Expand All @@ -96,11 +96,11 @@ define i1 @extractelt_nxv32i1(<vscale x 32 x i8>* %x, i64 %idx) nounwind {
; CHECK-LABEL: extractelt_nxv32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vl4r.v v8, (a0)
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, mu
; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
Expand All @@ -114,11 +114,11 @@ define i1 @extractelt_nxv64i1(<vscale x 64 x i8>* %x, i64 %idx) nounwind {
; CHECK-LABEL: extractelt_nxv64i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vl8r.v v8, (a0)
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
Expand Down Expand Up @@ -150,7 +150,7 @@ define i1 @extractelt_nxv128i1(<vscale x 128 x i8>* %x, i64 %idx) nounwind {
; CHECK-NEXT: vl8r.v v16, (a4)
; CHECK-NEXT: vl8r.v v24, (a0)
; CHECK-NEXT: add a0, a3, a1
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
; CHECK-NEXT: vmseq.vi v8, v16, 0
; CHECK-NEXT: vmseq.vi v0, v24, 0
; CHECK-NEXT: vmv.v.i v16, 0
Expand Down
156 changes: 78 additions & 78 deletions llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll

Large diffs are not rendered by default.

152 changes: 76 additions & 76 deletions llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll

Large diffs are not rendered by default.

12 changes: 6 additions & 6 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vector-fpext-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ define <2 x float> @vfpext_v2f16_v2f32(<2 x half> %a, <2 x i1> %m, i32 zeroext %
define <2 x float> @vfpext_v2f16_v2f32_unmasked(<2 x half> %a, i32 zeroext %vl) {
; CHECK-LABEL: vfpext_v2f16_v2f32_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfwcvt.f.f.v v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
Expand All @@ -43,9 +43,9 @@ define <2 x double> @vfpext_v2f16_v2f64(<2 x half> %a, <2 x i1> %m, i32 zeroext
define <2 x double> @vfpext_v2f16_v2f64_unmasked(<2 x half> %a, i32 zeroext %vl) {
; CHECK-LABEL: vfpext_v2f16_v2f64_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfwcvt.f.f.v v9, v8
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; CHECK-NEXT: vfwcvt.f.f.v v8, v9
; CHECK-NEXT: ret
%v = call <2 x double> @llvm.vp.fpext.v2f64.v2f16(<2 x half> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl)
Expand All @@ -68,7 +68,7 @@ define <2 x double> @vfpext_v2f32_v2f64(<2 x float> %a, <2 x i1> %m, i32 zeroext
define <2 x double> @vfpext_v2f32_v2f64_unmasked(<2 x float> %a, i32 zeroext %vl) {
; CHECK-LABEL: vfpext_v2f32_v2f64_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfwcvt.f.f.v v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
Expand Down Expand Up @@ -96,14 +96,14 @@ define <32 x double> @vfpext_v32f32_v32f64(<32 x float> %a, <32 x i1> %m, i32 ze
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v1, v0
; CHECK-NEXT: li a1, 0
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: addi a2, a0, -16
; CHECK-NEXT: vslidedown.vi v0, v0, 2
; CHECK-NEXT: bltu a0, a2, .LBB7_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB7_2:
; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; CHECK-NEXT: vslidedown.vi v24, v8, 16
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: li a1, 16
Expand Down
12 changes: 6 additions & 6 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ define <2 x half> @vfptrunc_v2f16_v2f32(<2 x float> %a, <2 x i1> %m, i32 zeroext
define <2 x half> @vfptrunc_v2f16_v2f32_unmasked(<2 x float> %a, i32 zeroext %vl) {
; CHECK-LABEL: vfptrunc_v2f16_v2f32_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfncvt.f.f.w v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
Expand All @@ -43,9 +43,9 @@ define <2 x half> @vfptrunc_v2f16_v2f64(<2 x double> %a, <2 x i1> %m, i32 zeroex
define <2 x half> @vfptrunc_v2f16_v2f64_unmasked(<2 x double> %a, i32 zeroext %vl) {
; CHECK-LABEL: vfptrunc_v2f16_v2f64_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfncvt.rod.f.f.w v9, v8
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; CHECK-NEXT: vfncvt.f.f.w v8, v9
; CHECK-NEXT: ret
%v = call <2 x half> @llvm.vp.fptrunc.v2f16.v2f64(<2 x double> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl)
Expand All @@ -68,7 +68,7 @@ define <2 x float> @vfptrunc_v2f32_v2f64(<2 x double> %a, <2 x i1> %m, i32 zeroe
define <2 x float> @vfptrunc_v2f32_v2f64_unmasked(<2 x double> %a, i32 zeroext %vl) {
; CHECK-LABEL: vfptrunc_v2f32_v2f64_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfncvt.f.f.w v9, v8
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
Expand Down Expand Up @@ -103,7 +103,7 @@ define <32 x float> @vfptrunc_v32f32_v32f64(<32 x double> %a, <32 x i1> %m, i32
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: li a1, 0
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: addi a2, a0, -16
; CHECK-NEXT: vslidedown.vi v0, v0, 2
; CHECK-NEXT: bltu a0, a2, .LBB7_2
Expand All @@ -123,7 +123,7 @@ define <32 x float> @vfptrunc_v32f32_v32f64(<32 x double> %a, <32 x i1> %m, i32
; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfncvt.f.f.w v16, v24, v0.t
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu
; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma
; CHECK-NEXT: vslideup.vi v16, v8, 16
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: csrr a0, vlenb
Expand Down
14 changes: 7 additions & 7 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vector-segN-load.ll
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
define <8 x i8> @load_factor2(<16 x i8>* %ptr) {
; CHECK-LABEL: load_factor2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vlseg2e8.v v7, (a0)
; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v7_v8
; CHECK-NEXT: ret
Expand All @@ -19,7 +19,7 @@ define <8 x i8> @load_factor2(<16 x i8>* %ptr) {
define <8 x i8> @load_factor3(<24 x i8>* %ptr) {
; CHECK-LABEL: load_factor3:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vlseg3e8.v v6, (a0)
; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v6_v7_v8
; CHECK-NEXT: ret
Expand All @@ -34,7 +34,7 @@ define <8 x i8> @load_factor3(<24 x i8>* %ptr) {
define <8 x i8> @load_factor4(<32 x i8>* %ptr) {
; CHECK-LABEL: load_factor4:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vlseg4e8.v v5, (a0)
; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v5_v6_v7_v8
; CHECK-NEXT: ret
Expand All @@ -50,7 +50,7 @@ define <8 x i8> @load_factor4(<32 x i8>* %ptr) {
define <8 x i8> @load_factor5(<40 x i8>* %ptr) {
; CHECK-LABEL: load_factor5:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vlseg5e8.v v4, (a0)
; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v4_v5_v6_v7_v8
; CHECK-NEXT: ret
Expand All @@ -67,7 +67,7 @@ define <8 x i8> @load_factor5(<40 x i8>* %ptr) {
define <8 x i8> @load_factor6(<48 x i8>* %ptr) {
; CHECK-LABEL: load_factor6:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vlseg6e8.v v3, (a0)
; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v3_v4_v5_v6_v7_v8
; CHECK-NEXT: ret
Expand All @@ -85,7 +85,7 @@ define <8 x i8> @load_factor6(<48 x i8>* %ptr) {
define <8 x i8> @load_factor7(<56 x i8>* %ptr) {
; CHECK-LABEL: load_factor7:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vlseg7e8.v v2, (a0)
; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v2_v3_v4_v5_v6_v7_v8
; CHECK-NEXT: ret
Expand All @@ -104,7 +104,7 @@ define <8 x i8> @load_factor7(<56 x i8>* %ptr) {
define <8 x i8> @load_factor8(<64 x i8>* %ptr) {
; CHECK-LABEL: load_factor8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vlseg8e8.v v1, (a0)
; CHECK-NEXT: # kill: def $v8 killed $v8 killed $v1_v2_v3_v4_v5_v6_v7_v8
; CHECK-NEXT: ret
Expand Down
292 changes: 146 additions & 146 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-reverse.ll

Large diffs are not rendered by default.

40 changes: 20 additions & 20 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store-asm.ll
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ define void @gather(i8* noalias nocapture %A, i8* noalias nocapture readonly %B)
; CHECK-NEXT: li a5, 1024
; CHECK-NEXT: .LBB0_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vsetvli zero, a3, e8, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-NEXT: vlse8.v v8, (a1), a4
; CHECK-NEXT: add a6, a0, a2
; CHECK-NEXT: vle8.v v9, (a6)
Expand Down Expand Up @@ -60,7 +60,7 @@ define void @gather_masked(i8* noalias nocapture %A, i8* noalias nocapture reado
; V-NEXT: li a2, 0
; V-NEXT: lui a3, 983765
; V-NEXT: addiw a3, a3, 873
; V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu
; V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; V-NEXT: vmv.s.x v0, a3
; V-NEXT: li a3, 32
; V-NEXT: li a4, 5
Expand All @@ -85,7 +85,7 @@ define void @gather_masked(i8* noalias nocapture %A, i8* noalias nocapture reado
; ZVE32F-NEXT: li a2, 0
; ZVE32F-NEXT: lui a3, 983765
; ZVE32F-NEXT: addiw a3, a3, 873
; ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu
; ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; ZVE32F-NEXT: vmv.s.x v0, a3
; ZVE32F-NEXT: li a3, 32
; ZVE32F-NEXT: li a4, 5
Expand Down Expand Up @@ -139,7 +139,7 @@ define void @gather_negative_stride(i8* noalias nocapture %A, i8* noalias nocapt
; CHECK-NEXT: li a5, 1024
; CHECK-NEXT: .LBB2_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vsetvli zero, a3, e8, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-NEXT: vlse8.v v8, (a1), a4
; CHECK-NEXT: add a6, a0, a2
; CHECK-NEXT: vle8.v v9, (a6)
Expand Down Expand Up @@ -183,7 +183,7 @@ define void @gather_zero_stride(i8* noalias nocapture %A, i8* noalias nocapture
; CHECK-NEXT: li a4, 1024
; CHECK-NEXT: .LBB3_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vsetvli zero, a3, e8, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-NEXT: vlse8.v v8, (a1), zero
; CHECK-NEXT: add a5, a0, a2
; CHECK-NEXT: vle8.v v9, (a5)
Expand Down Expand Up @@ -233,7 +233,7 @@ define void @scatter(i8* noalias nocapture %A, i8* noalias nocapture readonly %B
; CHECK-NEXT: .LBB4_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: add a6, a1, a2
; CHECK-NEXT: vsetvli zero, a3, e8, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-NEXT: vle8.v v8, (a6)
; CHECK-NEXT: vlse8.v v9, (a0), a4
; CHECK-NEXT: vadd.vv v8, v9, v8
Expand Down Expand Up @@ -274,7 +274,7 @@ define void @scatter_masked(i8* noalias nocapture %A, i8* noalias nocapture read
; V-NEXT: li a3, 32
; V-NEXT: lui a4, 983765
; V-NEXT: addiw a4, a4, 873
; V-NEXT: vsetivli zero, 1, e32, mf2, ta, mu
; V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; V-NEXT: vmv.s.x v0, a4
; V-NEXT: li a4, 5
; V-NEXT: li a5, 1024
Expand All @@ -299,7 +299,7 @@ define void @scatter_masked(i8* noalias nocapture %A, i8* noalias nocapture read
; ZVE32F-NEXT: li a3, 32
; ZVE32F-NEXT: lui a4, 983765
; ZVE32F-NEXT: addiw a4, a4, 873
; ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, mu
; ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; ZVE32F-NEXT: vmv.s.x v0, a4
; ZVE32F-NEXT: li a4, 5
; ZVE32F-NEXT: li a5, 1024
Expand Down Expand Up @@ -353,13 +353,13 @@ define void @gather_pow2(i32* noalias nocapture %A, i32* noalias nocapture reado
; CHECK-NEXT: li a4, 32
; CHECK-NEXT: .LBB6_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vsetivli zero, 8, e32, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 8, e32, m1, ta, ma
; CHECK-NEXT: vlse32.v v8, (a1), a3
; CHECK-NEXT: vsetvli zero, a4, e8, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-NEXT: vle8.v v9, (a0)
; CHECK-NEXT: vsetivli zero, 8, e32, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 8, e32, m1, ta, ma
; CHECK-NEXT: vadd.vv v8, v9, v8
; CHECK-NEXT: vsetvli zero, a4, e8, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: addi a2, a2, -8
; CHECK-NEXT: addi a0, a0, 32
Expand Down Expand Up @@ -404,9 +404,9 @@ define void @scatter_pow2(i32* noalias nocapture %A, i32* noalias nocapture read
; CHECK-NEXT: li a4, 16
; CHECK-NEXT: .LBB7_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vsetvli zero, a3, e8, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a3, e8, m1, ta, ma
; CHECK-NEXT: vle8.v v8, (a1)
; CHECK-NEXT: vsetivli zero, 8, e32, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 8, e32, m1, ta, ma
; CHECK-NEXT: vlse32.v v9, (a0), a4
; CHECK-NEXT: vadd.vv v8, v9, v8
; CHECK-NEXT: vsse32.v v8, (a0), a4
Expand Down Expand Up @@ -454,7 +454,7 @@ define void @struct_gather(i32* noalias nocapture %A, %struct.foo* noalias nocap
; CHECK-NEXT: addi a1, a1, 132
; CHECK-NEXT: li a2, 1024
; CHECK-NEXT: li a3, 16
; CHECK-NEXT: vsetivli zero, 8, e32, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 8, e32, m1, ta, ma
; CHECK-NEXT: .LBB8_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: addi a4, a1, -128
Expand Down Expand Up @@ -520,7 +520,7 @@ define void @gather_unroll(i32* noalias nocapture %A, i32* noalias nocapture rea
; CHECK-NEXT: li a2, 256
; CHECK-NEXT: li a3, 64
; CHECK-NEXT: li a4, 16
; CHECK-NEXT: vsetivli zero, 8, e32, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 8, e32, m1, ta, ma
; CHECK-NEXT: .LBB9_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vlse32.v v8, (a1), a3
Expand Down Expand Up @@ -609,7 +609,7 @@ define void @gather_of_pointers(i32** noalias nocapture %0, i32** noalias nocapt
; V: # %bb.0:
; V-NEXT: li a2, 1024
; V-NEXT: li a3, 40
; V-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; V-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; V-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1
; V-NEXT: vlse64.v v8, (a1), a3
; V-NEXT: addi a4, a1, 80
Expand Down Expand Up @@ -686,7 +686,7 @@ define void @scatter_of_pointers(i32** noalias nocapture %0, i32** noalias nocap
; V: # %bb.0:
; V-NEXT: li a2, 1024
; V-NEXT: li a3, 40
; V-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; V-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; V-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1
; V-NEXT: vle64.v v8, (a1)
; V-NEXT: addi a4, a1, 16
Expand Down Expand Up @@ -782,7 +782,7 @@ define void @strided_load_startval_add_with_splat(i8* noalias nocapture %0, i8*
; CHECK-NEXT: li t0, 5
; CHECK-NEXT: mv t1, a5
; CHECK-NEXT: .LBB12_3: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vsetvli zero, a7, e8, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a7, e8, m1, ta, ma
; CHECK-NEXT: vlse8.v v8, (a6), t0
; CHECK-NEXT: vle8.v v9, (a2)
; CHECK-NEXT: vadd.vv v8, v9, v8
Expand Down Expand Up @@ -883,7 +883,7 @@ define void @gather_no_scalar_remainder(i8* noalias nocapture noundef %arg, i8*
; CHECK-NEXT: beqz a2, .LBB13_3
; CHECK-NEXT: # %bb.1: # %bb2
; CHECK-NEXT: li a3, 5
; CHECK-NEXT: vsetivli zero, 16, e8, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 16, e8, mf2, ta, ma
; CHECK-NEXT: .LBB13_2: # %bb4
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vlse8.v v8, (a1), a3
Expand Down
6 changes: 3 additions & 3 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp-mask.ll
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ define <2 x i1> @vtrunc_nxv2i1_nxv2i16(<2 x i16> %a, <2 x i1> %m, i32 zeroext %v
define <2 x i1> @vtrunc_nxv2i1_nxv2i16_unmasked(<2 x i16> %a, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_nxv2i1_nxv2i16_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
Expand All @@ -44,7 +44,7 @@ define <2 x i1> @vtrunc_nxv2i1_nxv2i32(<2 x i32> %a, <2 x i1> %m, i32 zeroext %v
define <2 x i1> @vtrunc_nxv2i1_nxv2i32_unmasked(<2 x i32> %a, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_nxv2i1_nxv2i32_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
Expand All @@ -69,7 +69,7 @@ define <2 x i1> @vtrunc_nxv2i1_nxv2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %v
define <2 x i1> @vtrunc_nxv2i1_nxv2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_nxv2i1_nxv2i64_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
Expand Down
52 changes: 26 additions & 26 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ define <2 x i8> @vtrunc_nxv2i8_nxv2i16(<2 x i16> %a, <2 x i1> %m, i32 zeroext %v
define <2 x i8> @vtrunc_nxv2i8_nxv2i16_unmasked(<2 x i16> %a, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_nxv2i8_nxv2i16_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%v = call <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i16(<2 x i16> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl)
Expand All @@ -62,7 +62,7 @@ define <128 x i7> @vtrunc_nxv128i7_nxv128i16(<128 x i16> %a, <128 x i1> %m, i32
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: li a1, 0
; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; CHECK-NEXT: addi a2, a0, -64
; CHECK-NEXT: vslidedown.vi v0, v0, 8
; CHECK-NEXT: bltu a0, a2, .LBB4_2
Expand All @@ -82,7 +82,7 @@ define <128 x i7> @vtrunc_nxv128i7_nxv128i16(<128 x i16> %a, <128 x i1> %m, i32
; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vnsrl.wi v16, v24, 0, v0.t
; CHECK-NEXT: li a0, 128
; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu
; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma
; CHECK-NEXT: vslideup.vx v16, v8, a1
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: csrr a0, vlenb
Expand Down Expand Up @@ -111,9 +111,9 @@ define <2 x i8> @vtrunc_nxv2i8_nxv2i32(<2 x i32> %a, <2 x i1> %m, i32 zeroext %v
define <2 x i8> @vtrunc_nxv2i8_nxv2i32_unmasked(<2 x i32> %a, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_nxv2i8_nxv2i32_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%v = call <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i32(<2 x i32> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl)
Expand All @@ -139,11 +139,11 @@ define <2 x i8> @vtrunc_nxv2i8_nxv2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %v
define <2 x i8> @vtrunc_nxv2i8_nxv2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_nxv2i8_nxv2i64_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%v = call <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i64(<2 x i64> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl)
Expand All @@ -165,7 +165,7 @@ define <2 x i16> @vtrunc_nxv2i16_nxv2i32(<2 x i32> %a, <2 x i1> %m, i32 zeroext
define <2 x i16> @vtrunc_nxv2i16_nxv2i32_unmasked(<2 x i32> %a, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_nxv2i16_nxv2i32_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%v = call <2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i32(<2 x i32> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl)
Expand All @@ -189,9 +189,9 @@ define <2 x i16> @vtrunc_nxv2i16_nxv2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext
define <2 x i16> @vtrunc_nxv2i16_nxv2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_nxv2i16_nxv2i64_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%v = call <2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i64(<2 x i64> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl)
Expand Down Expand Up @@ -227,7 +227,7 @@ define <2 x i32> @vtrunc_nxv2i32_nxv2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext
define <2 x i32> @vtrunc_nxv2i32_nxv2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_nxv2i32_nxv2i64_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
%v = call <2 x i32> @llvm.vp.trunc.nxv2i64.nxv2i32(<2 x i64> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl)
Expand Down Expand Up @@ -258,15 +258,15 @@ define <128 x i32> @vtrunc_nxv128i32_nxv128i64(<128 x i64> %a, <128 x i1> %m, i3
; CHECK-NEXT: addi a2, a2, 16
; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: li a2, 0
; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; CHECK-NEXT: addi a3, a7, -64
; CHECK-NEXT: vslidedown.vi v2, v0, 8
; CHECK-NEXT: mv a4, a2
; CHECK-NEXT: bltu a7, a3, .LBB16_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a4, a3
; CHECK-NEXT: .LBB16_2:
; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vi v3, v2, 4
; CHECK-NEXT: addi a6, a4, -32
; CHECK-NEXT: addi a3, a1, 640
Expand All @@ -275,9 +275,9 @@ define <128 x i32> @vtrunc_nxv128i32_nxv128i64(<128 x i64> %a, <128 x i1> %m, i3
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a5, a6
; CHECK-NEXT: .LBB16_4:
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v3, 2
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v16, (a3)
; CHECK-NEXT: addi t0, a5, -16
; CHECK-NEXT: addi a6, a1, 512
Expand Down Expand Up @@ -313,7 +313,7 @@ define <128 x i32> @vtrunc_nxv128i32_nxv128i64(<128 x i64> %a, <128 x i1> %m, i3
; CHECK-NEXT: # %bb.9:
; CHECK-NEXT: li a7, 64
; CHECK-NEXT: .LBB16_10:
; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vi v3, v1, 4
; CHECK-NEXT: addi t0, a7, -32
; CHECK-NEXT: addi a5, a1, 128
Expand All @@ -322,9 +322,9 @@ define <128 x i32> @vtrunc_nxv128i32_nxv128i64(<128 x i64> %a, <128 x i1> %m, i3
; CHECK-NEXT: # %bb.11:
; CHECK-NEXT: mv a6, t0
; CHECK-NEXT: .LBB16_12:
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v3, 2
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v16, (a5)
; CHECK-NEXT: addi a5, a6, -16
; CHECK-NEXT: mv t0, a2
Expand Down Expand Up @@ -359,9 +359,9 @@ define <128 x i32> @vtrunc_nxv128i32_nxv128i64(<128 x i64> %a, <128 x i1> %m, i3
; CHECK-NEXT: # %bb.17:
; CHECK-NEXT: li a4, 32
; CHECK-NEXT: .LBB16_18:
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v2, 2
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (t0)
; CHECK-NEXT: addi t0, a4, -16
; CHECK-NEXT: addi a6, a1, 256
Expand All @@ -386,14 +386,14 @@ define <128 x i32> @vtrunc_nxv128i32_nxv128i64(<128 x i64> %a, <128 x i1> %m, i3
; CHECK-NEXT: # %bb.23:
; CHECK-NEXT: li a7, 32
; CHECK-NEXT: .LBB16_24:
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: addi a1, a7, -16
; CHECK-NEXT: vslidedown.vi v0, v1, 2
; CHECK-NEXT: bltu a7, a1, .LBB16_26
; CHECK-NEXT: # %bb.25:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB16_26:
; CHECK-NEXT: vsetvli zero, a5, e32, m8, tu, mu
; CHECK-NEXT: vsetvli zero, a5, e32, m8, tu, ma
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: li a4, 48
; CHECK-NEXT: mul a1, a1, a4
Expand Down Expand Up @@ -458,7 +458,7 @@ define <128 x i32> @vtrunc_nxv128i32_nxv128i64(<128 x i64> %a, <128 x i1> %m, i3
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vl8re8.v v8, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t
; CHECK-NEXT: vsetvli zero, a5, e32, m8, tu, mu
; CHECK-NEXT: vsetvli zero, a5, e32, m8, tu, ma
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: add a1, sp, a1
Expand Down Expand Up @@ -508,7 +508,7 @@ define <32 x i32> @vtrunc_nxv32i32_nxv32i64(<32 x i64> %a, <32 x i1> %m, i32 zer
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: li a1, 0
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: addi a2, a0, -16
; CHECK-NEXT: vslidedown.vi v0, v0, 2
; CHECK-NEXT: bltu a0, a2, .LBB17_2
Expand All @@ -528,7 +528,7 @@ define <32 x i32> @vtrunc_nxv32i32_nxv32i64(<32 x i64> %a, <32 x i1> %m, i32 zer
; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vnsrl.wi v16, v24, 0, v0.t
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, mu
; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma
; CHECK-NEXT: vslideup.vi v16, v8, 16
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: csrr a0, vlenb
Expand Down
32 changes: 16 additions & 16 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs.ll
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
define void @abs_v16i8(<16 x i8>* %x) {
; CHECK-LABEL: abs_v16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
Expand All @@ -23,7 +23,7 @@ declare <16 x i8> @llvm.abs.v16i8(<16 x i8>, i1)
define void @abs_v8i16(<8 x i16>* %x) {
; CHECK-LABEL: abs_v8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
Expand All @@ -39,7 +39,7 @@ declare <8 x i16> @llvm.abs.v8i16(<8 x i16>, i1)
define void @abs_v4i32(<4 x i32>* %x) {
; CHECK-LABEL: abs_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
Expand All @@ -55,7 +55,7 @@ declare <4 x i32> @llvm.abs.v4i32(<4 x i32>, i1)
define void @abs_v2i64(<2 x i64>* %x) {
; CHECK-LABEL: abs_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vrsub.vi v9, v8, 0
; CHECK-NEXT: vmax.vv v8, v8, v9
Expand All @@ -72,7 +72,7 @@ define void @abs_v32i8(<32 x i8>* %x) {
; LMULMAX2-LABEL: abs_v32i8:
; LMULMAX2: # %bb.0:
; LMULMAX2-NEXT: li a1, 32
; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; LMULMAX2-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; LMULMAX2-NEXT: vle8.v v8, (a0)
; LMULMAX2-NEXT: vrsub.vi v10, v8, 0
; LMULMAX2-NEXT: vmax.vv v8, v8, v10
Expand All @@ -81,7 +81,7 @@ define void @abs_v32i8(<32 x i8>* %x) {
;
; LMULMAX1-RV32-LABEL: abs_v32i8:
; LMULMAX1-RV32: # %bb.0:
; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; LMULMAX1-RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; LMULMAX1-RV32-NEXT: addi a1, a0, 16
; LMULMAX1-RV32-NEXT: vle8.v v8, (a1)
; LMULMAX1-RV32-NEXT: vle8.v v9, (a0)
Expand All @@ -95,7 +95,7 @@ define void @abs_v32i8(<32 x i8>* %x) {
;
; LMULMAX1-RV64-LABEL: abs_v32i8:
; LMULMAX1-RV64: # %bb.0:
; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; LMULMAX1-RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; LMULMAX1-RV64-NEXT: addi a1, a0, 16
; LMULMAX1-RV64-NEXT: vle8.v v8, (a1)
; LMULMAX1-RV64-NEXT: vle8.v v9, (a0)
Expand All @@ -116,7 +116,7 @@ declare <32 x i8> @llvm.abs.v32i8(<32 x i8>, i1)
define void @abs_v16i16(<16 x i16>* %x) {
; LMULMAX2-LABEL: abs_v16i16:
; LMULMAX2: # %bb.0:
; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, mu
; LMULMAX2-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; LMULMAX2-NEXT: vle16.v v8, (a0)
; LMULMAX2-NEXT: vrsub.vi v10, v8, 0
; LMULMAX2-NEXT: vmax.vv v8, v8, v10
Expand All @@ -125,7 +125,7 @@ define void @abs_v16i16(<16 x i16>* %x) {
;
; LMULMAX1-RV32-LABEL: abs_v16i16:
; LMULMAX1-RV32: # %bb.0:
; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; LMULMAX1-RV32-NEXT: addi a1, a0, 16
; LMULMAX1-RV32-NEXT: vle16.v v8, (a1)
; LMULMAX1-RV32-NEXT: vle16.v v9, (a0)
Expand All @@ -139,7 +139,7 @@ define void @abs_v16i16(<16 x i16>* %x) {
;
; LMULMAX1-RV64-LABEL: abs_v16i16:
; LMULMAX1-RV64: # %bb.0:
; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; LMULMAX1-RV64-NEXT: addi a1, a0, 16
; LMULMAX1-RV64-NEXT: vle16.v v8, (a1)
; LMULMAX1-RV64-NEXT: vle16.v v9, (a0)
Expand All @@ -160,7 +160,7 @@ declare <16 x i16> @llvm.abs.v16i16(<16 x i16>, i1)
define void @abs_v8i32(<8 x i32>* %x) {
; LMULMAX2-LABEL: abs_v8i32:
; LMULMAX2: # %bb.0:
; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; LMULMAX2-NEXT: vle32.v v8, (a0)
; LMULMAX2-NEXT: vrsub.vi v10, v8, 0
; LMULMAX2-NEXT: vmax.vv v8, v8, v10
Expand All @@ -169,7 +169,7 @@ define void @abs_v8i32(<8 x i32>* %x) {
;
; LMULMAX1-RV32-LABEL: abs_v8i32:
; LMULMAX1-RV32: # %bb.0:
; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; LMULMAX1-RV32-NEXT: addi a1, a0, 16
; LMULMAX1-RV32-NEXT: vle32.v v8, (a1)
; LMULMAX1-RV32-NEXT: vle32.v v9, (a0)
Expand All @@ -183,7 +183,7 @@ define void @abs_v8i32(<8 x i32>* %x) {
;
; LMULMAX1-RV64-LABEL: abs_v8i32:
; LMULMAX1-RV64: # %bb.0:
; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; LMULMAX1-RV64-NEXT: addi a1, a0, 16
; LMULMAX1-RV64-NEXT: vle32.v v8, (a1)
; LMULMAX1-RV64-NEXT: vle32.v v9, (a0)
Expand All @@ -204,7 +204,7 @@ declare <8 x i32> @llvm.abs.v8i32(<8 x i32>, i1)
define void @abs_v4i64(<4 x i64>* %x) {
; LMULMAX2-LABEL: abs_v4i64:
; LMULMAX2: # %bb.0:
; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; LMULMAX2-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; LMULMAX2-NEXT: vle64.v v8, (a0)
; LMULMAX2-NEXT: vrsub.vi v10, v8, 0
; LMULMAX2-NEXT: vmax.vv v8, v8, v10
Expand All @@ -213,7 +213,7 @@ define void @abs_v4i64(<4 x i64>* %x) {
;
; LMULMAX1-RV32-LABEL: abs_v4i64:
; LMULMAX1-RV32: # %bb.0:
; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; LMULMAX1-RV32-NEXT: addi a1, a0, 16
; LMULMAX1-RV32-NEXT: vle64.v v8, (a1)
; LMULMAX1-RV32-NEXT: vle64.v v9, (a0)
Expand All @@ -227,7 +227,7 @@ define void @abs_v4i64(<4 x i64>* %x) {
;
; LMULMAX1-RV64-LABEL: abs_v4i64:
; LMULMAX1-RV64: # %bb.0:
; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; LMULMAX1-RV64-NEXT: addi a1, a0, 16
; LMULMAX1-RV64-NEXT: vle64.v v8, (a1)
; LMULMAX1-RV64-NEXT: vle64.v v9, (a0)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ define <512 x i8> @bitcast_1024B(<256 x i16> %a, <512 x i8> %b) {
; VLEN256: # %bb.0:
; VLEN256-NEXT: addi a1, a0, 256
; VLEN256-NEXT: li a2, 256
; VLEN256-NEXT: vsetvli zero, a2, e8, m8, ta, mu
; VLEN256-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; VLEN256-NEXT: vle8.v v24, (a0)
; VLEN256-NEXT: vle8.v v0, (a1)
; VLEN256-NEXT: vadd.vv v8, v24, v8
Expand All @@ -18,14 +18,14 @@ define <512 x i8> @bitcast_1024B(<256 x i16> %a, <512 x i8> %b) {
; VLEN512-LABEL: bitcast_1024B:
; VLEN512: # %bb.0:
; VLEN512-NEXT: li a0, 512
; VLEN512-NEXT: vsetvli zero, a0, e8, m8, ta, mu
; VLEN512-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; VLEN512-NEXT: vadd.vv v8, v16, v8
; VLEN512-NEXT: ret
;
; VLEN1024-LABEL: bitcast_1024B:
; VLEN1024: # %bb.0:
; VLEN1024-NEXT: li a0, 512
; VLEN1024-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; VLEN1024-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; VLEN1024-NEXT: vadd.vv v8, v12, v8
; VLEN1024-NEXT: ret
%c = bitcast <256 x i16> %a to <512 x i8>
Expand Down
140 changes: 70 additions & 70 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll

Large diffs are not rendered by default.

120 changes: 60 additions & 60 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse.ll

Large diffs are not rendered by default.

82 changes: 41 additions & 41 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap.ll
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
define void @bswap_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
; CHECK-LABEL: bswap_v8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: vsrl.vi v9, v8, 8
; CHECK-NEXT: vsll.vi v8, v8, 8
Expand All @@ -25,7 +25,7 @@ declare <8 x i16> @llvm.bswap.v8i16(<8 x i16>)
define void @bswap_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
; RV32-LABEL: bswap_v4i32:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vle32.v v8, (a0)
; RV32-NEXT: vsrl.vi v9, v8, 8
; RV32-NEXT: lui a1, 16
Expand All @@ -44,7 +44,7 @@ define void @bswap_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
;
; RV64-LABEL: bswap_v4i32:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64-NEXT: vle32.v v8, (a0)
; RV64-NEXT: vsrl.vi v9, v8, 8
; RV64-NEXT: lui a1, 16
Expand All @@ -71,7 +71,7 @@ declare <4 x i32> @llvm.bswap.v4i32(<4 x i32>)
define void @bswap_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
; RV32-LABEL: bswap_v2i64:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-NEXT: vle64.v v8, (a0)
; RV32-NEXT: li a1, 56
; RV32-NEXT: vsrl.vx v9, v8, a1
Expand All @@ -86,34 +86,34 @@ define void @bswap_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
; RV32-NEXT: vand.vx v10, v10, a4
; RV32-NEXT: li a5, 5
; RV32-NEXT: vmv.s.x v0, a5
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vmv.v.i v11, 0
; RV32-NEXT: lui a5, 1044480
; RV32-NEXT: vmerge.vxm v11, v11, a5, v0
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-NEXT: vsrl.vi v12, v8, 8
; RV32-NEXT: vand.vv v11, v12, v11
; RV32-NEXT: vor.vv v10, v11, v10
; RV32-NEXT: vor.vv v9, v10, v9
; RV32-NEXT: li a5, 255
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vmv.v.x v10, a5
; RV32-NEXT: vmerge.vim v10, v10, 0, v0
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-NEXT: vsll.vi v11, v8, 8
; RV32-NEXT: vand.vv v10, v11, v10
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vmv.v.x v11, a3
; RV32-NEXT: vmerge.vim v11, v11, 0, v0
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-NEXT: vsll.vi v12, v8, 24
; RV32-NEXT: vand.vv v11, v12, v11
; RV32-NEXT: vor.vv v10, v11, v10
; RV32-NEXT: vsll.vx v11, v8, a2
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vmv.v.x v12, a4
; RV32-NEXT: vmerge.vim v12, v12, 0, v0
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-NEXT: vand.vv v11, v11, v12
; RV32-NEXT: vsll.vx v8, v8, a1
; RV32-NEXT: vor.vv v8, v8, v11
Expand All @@ -124,7 +124,7 @@ define void @bswap_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
;
; RV64-LABEL: bswap_v2i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-NEXT: vle64.v v8, (a0)
; RV64-NEXT: li a1, 56
; RV64-NEXT: vsrl.vx v9, v8, a1
Expand Down Expand Up @@ -170,7 +170,7 @@ declare <2 x i64> @llvm.bswap.v2i64(<2 x i64>)
define void @bswap_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
; LMULMAX2-RV32-LABEL: bswap_v16i16:
; LMULMAX2-RV32: # %bb.0:
; LMULMAX2-RV32-NEXT: vsetivli zero, 16, e16, m2, ta, mu
; LMULMAX2-RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; LMULMAX2-RV32-NEXT: vle16.v v8, (a0)
; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 8
; LMULMAX2-RV32-NEXT: vsll.vi v8, v8, 8
Expand All @@ -180,7 +180,7 @@ define void @bswap_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
;
; LMULMAX2-RV64-LABEL: bswap_v16i16:
; LMULMAX2-RV64: # %bb.0:
; LMULMAX2-RV64-NEXT: vsetivli zero, 16, e16, m2, ta, mu
; LMULMAX2-RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; LMULMAX2-RV64-NEXT: vle16.v v8, (a0)
; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 8
; LMULMAX2-RV64-NEXT: vsll.vi v8, v8, 8
Expand All @@ -190,7 +190,7 @@ define void @bswap_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
;
; LMULMAX1-RV32-LABEL: bswap_v16i16:
; LMULMAX1-RV32: # %bb.0:
; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; LMULMAX1-RV32-NEXT: addi a1, a0, 16
; LMULMAX1-RV32-NEXT: vle16.v v8, (a1)
; LMULMAX1-RV32-NEXT: vle16.v v9, (a0)
Expand All @@ -206,7 +206,7 @@ define void @bswap_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
;
; LMULMAX1-RV64-LABEL: bswap_v16i16:
; LMULMAX1-RV64: # %bb.0:
; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, mu
; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; LMULMAX1-RV64-NEXT: addi a1, a0, 16
; LMULMAX1-RV64-NEXT: vle16.v v8, (a1)
; LMULMAX1-RV64-NEXT: vle16.v v9, (a0)
Expand All @@ -230,7 +230,7 @@ declare <16 x i16> @llvm.bswap.v16i16(<16 x i16>)
define void @bswap_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
; LMULMAX2-RV32-LABEL: bswap_v8i32:
; LMULMAX2-RV32: # %bb.0:
; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; LMULMAX2-RV32-NEXT: vle32.v v8, (a0)
; LMULMAX2-RV32-NEXT: vsrl.vi v10, v8, 8
; LMULMAX2-RV32-NEXT: lui a1, 16
Expand All @@ -249,7 +249,7 @@ define void @bswap_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
;
; LMULMAX2-RV64-LABEL: bswap_v8i32:
; LMULMAX2-RV64: # %bb.0:
; LMULMAX2-RV64-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; LMULMAX2-RV64-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; LMULMAX2-RV64-NEXT: vle32.v v8, (a0)
; LMULMAX2-RV64-NEXT: vsrl.vi v10, v8, 8
; LMULMAX2-RV64-NEXT: lui a1, 16
Expand All @@ -268,7 +268,7 @@ define void @bswap_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
;
; LMULMAX1-RV32-LABEL: bswap_v8i32:
; LMULMAX1-RV32: # %bb.0:
; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; LMULMAX1-RV32-NEXT: addi a1, a0, 16
; LMULMAX1-RV32-NEXT: vle32.v v8, (a1)
; LMULMAX1-RV32-NEXT: vle32.v v9, (a0)
Expand Down Expand Up @@ -299,7 +299,7 @@ define void @bswap_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
;
; LMULMAX1-RV64-LABEL: bswap_v8i32:
; LMULMAX1-RV64: # %bb.0:
; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; LMULMAX1-RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; LMULMAX1-RV64-NEXT: addi a1, a0, 16
; LMULMAX1-RV64-NEXT: vle32.v v8, (a1)
; LMULMAX1-RV64-NEXT: vle32.v v9, (a0)
Expand Down Expand Up @@ -338,7 +338,7 @@ declare <8 x i32> @llvm.bswap.v8i32(<8 x i32>)
define void @bswap_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
; LMULMAX2-RV32-LABEL: bswap_v4i64:
; LMULMAX2-RV32: # %bb.0:
; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; LMULMAX2-RV32-NEXT: vle64.v v8, (a0)
; LMULMAX2-RV32-NEXT: li a1, 56
; LMULMAX2-RV32-NEXT: vsrl.vx v10, v8, a1
Expand All @@ -353,34 +353,34 @@ define void @bswap_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
; LMULMAX2-RV32-NEXT: vand.vx v12, v12, a4
; LMULMAX2-RV32-NEXT: li a5, 85
; LMULMAX2-RV32-NEXT: vmv.s.x v0, a5
; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; LMULMAX2-RV32-NEXT: vmv.v.i v14, 0
; LMULMAX2-RV32-NEXT: lui a5, 1044480
; LMULMAX2-RV32-NEXT: vmerge.vxm v14, v14, a5, v0
; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; LMULMAX2-RV32-NEXT: vsrl.vi v16, v8, 8
; LMULMAX2-RV32-NEXT: vand.vv v14, v16, v14
; LMULMAX2-RV32-NEXT: vor.vv v12, v14, v12
; LMULMAX2-RV32-NEXT: vor.vv v10, v12, v10
; LMULMAX2-RV32-NEXT: li a5, 255
; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; LMULMAX2-RV32-NEXT: vmv.v.x v12, a5
; LMULMAX2-RV32-NEXT: vmerge.vim v12, v12, 0, v0
; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; LMULMAX2-RV32-NEXT: vsll.vi v14, v8, 8
; LMULMAX2-RV32-NEXT: vand.vv v12, v14, v12
; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; LMULMAX2-RV32-NEXT: vmv.v.x v14, a3
; LMULMAX2-RV32-NEXT: vmerge.vim v14, v14, 0, v0
; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; LMULMAX2-RV32-NEXT: vsll.vi v16, v8, 24
; LMULMAX2-RV32-NEXT: vand.vv v14, v16, v14
; LMULMAX2-RV32-NEXT: vor.vv v12, v14, v12
; LMULMAX2-RV32-NEXT: vsll.vx v14, v8, a2
; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; LMULMAX2-RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; LMULMAX2-RV32-NEXT: vmv.v.x v16, a4
; LMULMAX2-RV32-NEXT: vmerge.vim v16, v16, 0, v0
; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; LMULMAX2-RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; LMULMAX2-RV32-NEXT: vand.vv v14, v14, v16
; LMULMAX2-RV32-NEXT: vsll.vx v8, v8, a1
; LMULMAX2-RV32-NEXT: vor.vv v8, v8, v14
Expand All @@ -391,7 +391,7 @@ define void @bswap_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
;
; LMULMAX2-RV64-LABEL: bswap_v4i64:
; LMULMAX2-RV64: # %bb.0:
; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; LMULMAX2-RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; LMULMAX2-RV64-NEXT: vle64.v v8, (a0)
; LMULMAX2-RV64-NEXT: li a1, 56
; LMULMAX2-RV64-NEXT: vsrl.vx v10, v8, a1
Expand Down Expand Up @@ -429,7 +429,7 @@ define void @bswap_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
;
; LMULMAX1-RV32-LABEL: bswap_v4i64:
; LMULMAX1-RV32: # %bb.0:
; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; LMULMAX1-RV32-NEXT: addi a1, a0, 16
; LMULMAX1-RV32-NEXT: vle64.v v9, (a1)
; LMULMAX1-RV32-NEXT: vle64.v v8, (a0)
Expand All @@ -446,34 +446,34 @@ define void @bswap_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
; LMULMAX1-RV32-NEXT: vand.vx v11, v11, a5
; LMULMAX1-RV32-NEXT: li a6, 5
; LMULMAX1-RV32-NEXT: vmv.s.x v0, a6
; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; LMULMAX1-RV32-NEXT: vmv.v.i v12, 0
; LMULMAX1-RV32-NEXT: lui a6, 1044480
; LMULMAX1-RV32-NEXT: vmerge.vxm v12, v12, a6, v0
; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; LMULMAX1-RV32-NEXT: vsrl.vi v13, v9, 8
; LMULMAX1-RV32-NEXT: vand.vv v13, v13, v12
; LMULMAX1-RV32-NEXT: vor.vv v11, v13, v11
; LMULMAX1-RV32-NEXT: vor.vv v10, v11, v10
; LMULMAX1-RV32-NEXT: li a6, 255
; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; LMULMAX1-RV32-NEXT: vmv.v.x v11, a6
; LMULMAX1-RV32-NEXT: vmerge.vim v11, v11, 0, v0
; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; LMULMAX1-RV32-NEXT: vsll.vi v13, v9, 8
; LMULMAX1-RV32-NEXT: vand.vv v13, v13, v11
; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; LMULMAX1-RV32-NEXT: vmv.v.x v14, a4
; LMULMAX1-RV32-NEXT: vmerge.vim v14, v14, 0, v0
; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; LMULMAX1-RV32-NEXT: vsll.vi v15, v9, 24
; LMULMAX1-RV32-NEXT: vand.vv v15, v15, v14
; LMULMAX1-RV32-NEXT: vor.vv v13, v15, v13
; LMULMAX1-RV32-NEXT: vsll.vx v15, v9, a3
; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; LMULMAX1-RV32-NEXT: vmv.v.x v16, a5
; LMULMAX1-RV32-NEXT: vmerge.vim v16, v16, 0, v0
; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; LMULMAX1-RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; LMULMAX1-RV32-NEXT: vand.vv v15, v15, v16
; LMULMAX1-RV32-NEXT: vsll.vx v9, v9, a2
; LMULMAX1-RV32-NEXT: vor.vv v9, v9, v15
Expand Down Expand Up @@ -506,7 +506,7 @@ define void @bswap_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
;
; LMULMAX1-RV64-LABEL: bswap_v4i64:
; LMULMAX1-RV64: # %bb.0:
; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; LMULMAX1-RV64-NEXT: addi a1, a0, 16
; LMULMAX1-RV64-NEXT: vle64.v v8, (a1)
; LMULMAX1-RV64-NEXT: vle64.v v9, (a0)
Expand Down
58 changes: 29 additions & 29 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv-fastcc.ll
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
define fastcc <4 x i8> @ret_v4i8(<4 x i8>* %p) {
; CHECK-LABEL: ret_v4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: ret
%v = load <4 x i8>, <4 x i8>* %p
Expand All @@ -15,7 +15,7 @@ define fastcc <4 x i8> @ret_v4i8(<4 x i8>* %p) {
define fastcc <4 x i32> @ret_v4i32(<4 x i32>* %p) {
; CHECK-LABEL: ret_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: ret
%v = load <4 x i32>, <4 x i32>* %p
Expand All @@ -25,7 +25,7 @@ define fastcc <4 x i32> @ret_v4i32(<4 x i32>* %p) {
define fastcc <8 x i32> @ret_v8i32(<8 x i32>* %p) {
; CHECK-LABEL: ret_v8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: ret
%v = load <8 x i32>, <8 x i32>* %p
Expand All @@ -35,13 +35,13 @@ define fastcc <8 x i32> @ret_v8i32(<8 x i32>* %p) {
define fastcc <16 x i64> @ret_v16i64(<16 x i64>* %p) {
; LMULMAX8-LABEL: ret_v16i64:
; LMULMAX8: # %bb.0:
; LMULMAX8-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; LMULMAX8-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; LMULMAX8-NEXT: vle64.v v8, (a0)
; LMULMAX8-NEXT: ret
;
; LMULMAX4-LABEL: ret_v16i64:
; LMULMAX4: # %bb.0:
; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; LMULMAX4-NEXT: vle64.v v8, (a0)
; LMULMAX4-NEXT: addi a0, a0, 64
; LMULMAX4-NEXT: vle64.v v12, (a0)
Expand All @@ -53,7 +53,7 @@ define fastcc <16 x i64> @ret_v16i64(<16 x i64>* %p) {
define fastcc <8 x i1> @ret_mask_v8i1(<8 x i1>* %p) {
; CHECK-LABEL: ret_mask_v8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
; CHECK-NEXT: ret
%v = load <8 x i1>, <8 x i1>* %p
Expand All @@ -64,7 +64,7 @@ define fastcc <32 x i1> @ret_mask_v32i1(<32 x i1>* %p) {
; CHECK-LABEL: ret_mask_v32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: li a1, 32
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
; CHECK-NEXT: ret
%v = load <32 x i1>, <32 x i1>* %p
Expand All @@ -76,15 +76,15 @@ define fastcc <64 x i32> @ret_split_v64i32(<64 x i32>* %x) {
; LMULMAX8-LABEL: ret_split_v64i32:
; LMULMAX8: # %bb.0:
; LMULMAX8-NEXT: li a1, 32
; LMULMAX8-NEXT: vsetvli zero, a1, e32, m8, ta, mu
; LMULMAX8-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; LMULMAX8-NEXT: vle32.v v8, (a0)
; LMULMAX8-NEXT: addi a0, a0, 128
; LMULMAX8-NEXT: vle32.v v16, (a0)
; LMULMAX8-NEXT: ret
;
; LMULMAX4-LABEL: ret_split_v64i32:
; LMULMAX4: # %bb.0:
; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; LMULMAX4-NEXT: vle32.v v8, (a0)
; LMULMAX4-NEXT: addi a1, a0, 64
; LMULMAX4-NEXT: vle32.v v12, (a1)
Expand All @@ -103,7 +103,7 @@ define fastcc <128 x i32> @ret_split_v128i32(<128 x i32>* %x) {
; LMULMAX8: # %bb.0:
; LMULMAX8-NEXT: addi a2, a1, 128
; LMULMAX8-NEXT: li a3, 32
; LMULMAX8-NEXT: vsetvli zero, a3, e32, m8, ta, mu
; LMULMAX8-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; LMULMAX8-NEXT: vle32.v v8, (a2)
; LMULMAX8-NEXT: addi a2, a1, 256
; LMULMAX8-NEXT: vle32.v v16, (a1)
Expand All @@ -122,7 +122,7 @@ define fastcc <128 x i32> @ret_split_v128i32(<128 x i32>* %x) {
; LMULMAX4-LABEL: ret_split_v128i32:
; LMULMAX4: # %bb.0:
; LMULMAX4-NEXT: addi a2, a1, 64
; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; LMULMAX4-NEXT: vle32.v v8, (a2)
; LMULMAX4-NEXT: addi a2, a1, 128
; LMULMAX4-NEXT: vle32.v v12, (a2)
Expand Down Expand Up @@ -160,7 +160,7 @@ define fastcc <128 x i32> @ret_split_v128i32(<128 x i32>* %x) {
define fastcc <4 x i8> @ret_v8i8_param_v4i8(<4 x i8> %v) {
; CHECK-LABEL: ret_v8i8_param_v4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, 2
; CHECK-NEXT: ret
%r = add <4 x i8> %v, <i8 2, i8 2, i8 2, i8 2>
Expand All @@ -170,7 +170,7 @@ define fastcc <4 x i8> @ret_v8i8_param_v4i8(<4 x i8> %v) {
define fastcc <4 x i8> @ret_v4i8_param_v4i8_v4i8(<4 x i8> %v, <4 x i8> %w) {
; CHECK-LABEL: ret_v4i8_param_v4i8_v4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v9
; CHECK-NEXT: ret
%r = add <4 x i8> %v, %w
Expand All @@ -180,7 +180,7 @@ define fastcc <4 x i8> @ret_v4i8_param_v4i8_v4i8(<4 x i8> %v, <4 x i8> %w) {
define fastcc <4 x i64> @ret_v4i64_param_v4i64_v4i64(<4 x i64> %v, <4 x i64> %w) {
; CHECK-LABEL: ret_v4i64_param_v4i64_v4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v10
; CHECK-NEXT: ret
%r = add <4 x i64> %v, %w
Expand All @@ -190,7 +190,7 @@ define fastcc <4 x i64> @ret_v4i64_param_v4i64_v4i64(<4 x i64> %v, <4 x i64> %w)
define fastcc <8 x i1> @ret_v8i1_param_v8i1_v8i1(<8 x i1> %v, <8 x i1> %w) {
; CHECK-LABEL: ret_v8i1_param_v8i1_v8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmxor.mm v0, v0, v8
; CHECK-NEXT: ret
%r = xor <8 x i1> %v, %w
Expand All @@ -201,7 +201,7 @@ define fastcc <32 x i1> @ret_v32i1_param_v32i1_v32i1(<32 x i1> %v, <32 x i1> %w)
; CHECK-LABEL: ret_v32i1_param_v32i1_v32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vmand.mm v0, v0, v8
; CHECK-NEXT: ret
%r = and <32 x i1> %v, %w
Expand All @@ -212,7 +212,7 @@ define fastcc <32 x i32> @ret_v32i32_param_v32i32_v32i32_v32i32_i32(<32 x i32> %
; LMULMAX8-LABEL: ret_v32i32_param_v32i32_v32i32_v32i32_i32:
; LMULMAX8: # %bb.0:
; LMULMAX8-NEXT: li a2, 32
; LMULMAX8-NEXT: vsetvli zero, a2, e32, m8, ta, mu
; LMULMAX8-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; LMULMAX8-NEXT: vle32.v v24, (a0)
; LMULMAX8-NEXT: vadd.vv v8, v8, v16
; LMULMAX8-NEXT: vadd.vv v8, v8, v24
Expand All @@ -221,7 +221,7 @@ define fastcc <32 x i32> @ret_v32i32_param_v32i32_v32i32_v32i32_i32(<32 x i32> %
;
; LMULMAX4-LABEL: ret_v32i32_param_v32i32_v32i32_v32i32_i32:
; LMULMAX4: # %bb.0:
; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; LMULMAX4-NEXT: addi a1, a0, 64
; LMULMAX4-NEXT: vle32.v v24, (a1)
; LMULMAX4-NEXT: vle32.v v28, (a0)
Expand Down Expand Up @@ -293,7 +293,7 @@ define fastcc <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x
; LMULMAX8-NEXT: .cfi_def_cfa s0, 0
; LMULMAX8-NEXT: andi sp, sp, -128
; LMULMAX8-NEXT: li a2, 32
; LMULMAX8-NEXT: vsetvli zero, a2, e32, m8, ta, mu
; LMULMAX8-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; LMULMAX8-NEXT: vle32.v v24, (a0)
; LMULMAX8-NEXT: mv a3, sp
; LMULMAX8-NEXT: mv a0, sp
Expand All @@ -318,7 +318,7 @@ define fastcc <32 x i32> @ret_v32i32_call_v32i32_v32i32_v32i32_i32(<32 x i32> %x
; LMULMAX4-NEXT: addi s0, sp, 256
; LMULMAX4-NEXT: .cfi_def_cfa s0, 0
; LMULMAX4-NEXT: andi sp, sp, -128
; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; LMULMAX4-NEXT: vle32.v v24, (a0)
; LMULMAX4-NEXT: addi a0, a0, 64
; LMULMAX4-NEXT: vle32.v v28, (a0)
Expand Down Expand Up @@ -347,15 +347,15 @@ define fastcc <32 x i32> @vector_arg_indirect_stack(i32 %0, i32 %1, i32 %2, i32
; LMULMAX8-LABEL: vector_arg_indirect_stack:
; LMULMAX8: # %bb.0:
; LMULMAX8-NEXT: li a0, 32
; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, mu
; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; LMULMAX8-NEXT: vle32.v v16, (t2)
; LMULMAX8-NEXT: vadd.vv v8, v8, v16
; LMULMAX8-NEXT: ret
;
; LMULMAX4-LABEL: vector_arg_indirect_stack:
; LMULMAX4: # %bb.0:
; LMULMAX4-NEXT: addi a0, t2, 64
; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; LMULMAX4-NEXT: vle32.v v16, (t2)
; LMULMAX4-NEXT: vle32.v v20, (a0)
; LMULMAX4-NEXT: vadd.vv v8, v8, v16
Expand All @@ -379,7 +379,7 @@ define fastcc <32 x i32> @pass_vector_arg_indirect_stack(<32 x i32> %x, <32 x i3
; LMULMAX8-NEXT: .cfi_def_cfa s0, 0
; LMULMAX8-NEXT: andi sp, sp, -128
; LMULMAX8-NEXT: li a0, 32
; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, mu
; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; LMULMAX8-NEXT: vmv.v.i v8, 0
; LMULMAX8-NEXT: mv a0, sp
; LMULMAX8-NEXT: li a1, 1
Expand Down Expand Up @@ -413,7 +413,7 @@ define fastcc <32 x i32> @pass_vector_arg_indirect_stack(<32 x i32> %x, <32 x i3
; LMULMAX4-NEXT: .cfi_def_cfa s0, 0
; LMULMAX4-NEXT: andi sp, sp, -128
; LMULMAX4-NEXT: addi a0, sp, 64
; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; LMULMAX4-NEXT: vmv.v.i v8, 0
; LMULMAX4-NEXT: vse32.v v8, (a0)
; LMULMAX4-NEXT: mv a0, sp
Expand Down Expand Up @@ -447,15 +447,15 @@ define fastcc <32 x i32> @vector_arg_direct_stack(i32 %0, i32 %1, i32 %2, i32 %3
; LMULMAX8: # %bb.0:
; LMULMAX8-NEXT: li a0, 32
; LMULMAX8-NEXT: addi a1, sp, 8
; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, mu
; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; LMULMAX8-NEXT: vle32.v v24, (a1)
; LMULMAX8-NEXT: vadd.vv v8, v8, v16
; LMULMAX8-NEXT: vadd.vv v8, v8, v24
; LMULMAX8-NEXT: ret
;
; LMULMAX4-LABEL: vector_arg_direct_stack:
; LMULMAX4: # %bb.0:
; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; LMULMAX4-NEXT: addi a0, sp, 8
; LMULMAX4-NEXT: vle32.v v24, (a0)
; LMULMAX4-NEXT: addi a0, sp, 72
Expand All @@ -479,7 +479,7 @@ define fastcc <32 x i32> @pass_vector_arg_direct_stack(<32 x i32> %x, <32 x i32>
; LMULMAX8-NEXT: sd ra, 152(sp) # 8-byte Folded Spill
; LMULMAX8-NEXT: .cfi_offset ra, -8
; LMULMAX8-NEXT: li a0, 32
; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, mu
; LMULMAX8-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; LMULMAX8-NEXT: vmv.v.i v8, 0
; LMULMAX8-NEXT: addi a0, sp, 8
; LMULMAX8-NEXT: vse32.v v8, (a0)
Expand Down Expand Up @@ -517,7 +517,7 @@ define fastcc <32 x i32> @pass_vector_arg_direct_stack(<32 x i32> %x, <32 x i32>
; LMULMAX4-NEXT: li a0, 13
; LMULMAX4-NEXT: sd a0, 0(sp)
; LMULMAX4-NEXT: addi a0, sp, 72
; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; LMULMAX4-NEXT: vmv.v.i v8, 0
; LMULMAX4-NEXT: vse32.v v8, (a0)
; LMULMAX4-NEXT: addi a0, sp, 8
Expand Down Expand Up @@ -552,7 +552,7 @@ define fastcc <4 x i1> @vector_mask_arg_direct_stack(i32 %0, i32 %1, i32 %2, i32
; CHECK-LABEL: vector_mask_arg_direct_stack:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, sp, 136
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; CHECK-NEXT: vlm.v v8, (a0)
; CHECK-NEXT: vmxor.mm v0, v0, v8
; CHECK-NEXT: ret
Expand Down
166 changes: 83 additions & 83 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-calling-conv.ll

Large diffs are not rendered by default.

30 changes: 15 additions & 15 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ define <2 x half> @vp_ceil_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, %hi(.LCPI1_0)
; CHECK-NEXT: flh ft0, %lo(.LCPI1_0)(a1)
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vmset.m v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vfabs.v v9, v8, v0.t
Expand Down Expand Up @@ -71,7 +71,7 @@ define <4 x half> @vp_ceil_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, %hi(.LCPI3_0)
; CHECK-NEXT: flh ft0, %lo(.LCPI3_0)(a1)
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; CHECK-NEXT: vmset.m v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vfabs.v v9, v8, v0.t
Expand Down Expand Up @@ -113,7 +113,7 @@ define <8 x half> @vp_ceil_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, %hi(.LCPI5_0)
; CHECK-NEXT: flh ft0, %lo(.LCPI5_0)(a1)
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmset.m v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vfabs.v v9, v8, v0.t
Expand Down Expand Up @@ -157,7 +157,7 @@ define <16 x half> @vp_ceil_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl) {
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, %hi(.LCPI7_0)
; CHECK-NEXT: flh ft0, %lo(.LCPI7_0)(a1)
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; CHECK-NEXT: vmset.m v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
Expand Down Expand Up @@ -201,7 +201,7 @@ define <2 x float> @vp_ceil_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, %hi(.LCPI9_0)
; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a1)
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vmset.m v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vfabs.v v9, v8, v0.t
Expand Down Expand Up @@ -243,7 +243,7 @@ define <4 x float> @vp_ceil_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, %hi(.LCPI11_0)
; CHECK-NEXT: flw ft0, %lo(.LCPI11_0)(a1)
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; CHECK-NEXT: vmset.m v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vfabs.v v9, v8, v0.t
Expand Down Expand Up @@ -287,7 +287,7 @@ define <8 x float> @vp_ceil_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, %hi(.LCPI13_0)
; CHECK-NEXT: flw ft0, %lo(.LCPI13_0)(a1)
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmset.m v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
Expand Down Expand Up @@ -333,7 +333,7 @@ define <16 x float> @vp_ceil_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl)
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, %hi(.LCPI15_0)
; CHECK-NEXT: flw ft0, %lo(.LCPI15_0)(a1)
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; CHECK-NEXT: vmset.m v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
Expand Down Expand Up @@ -377,7 +377,7 @@ define <2 x double> @vp_ceil_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, %hi(.LCPI17_0)
; CHECK-NEXT: fld ft0, %lo(.LCPI17_0)(a1)
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vmset.m v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vfabs.v v9, v8, v0.t
Expand Down Expand Up @@ -421,7 +421,7 @@ define <4 x double> @vp_ceil_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, %hi(.LCPI19_0)
; CHECK-NEXT: fld ft0, %lo(.LCPI19_0)(a1)
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; CHECK-NEXT: vmset.m v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
Expand Down Expand Up @@ -467,7 +467,7 @@ define <8 x double> @vp_ceil_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, %hi(.LCPI21_0)
; CHECK-NEXT: fld ft0, %lo(.LCPI21_0)(a1)
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmset.m v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
Expand Down Expand Up @@ -513,7 +513,7 @@ define <15 x double> @vp_ceil_v15f64_unmasked(<15 x double> %va, i32 zeroext %ev
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, %hi(.LCPI23_0)
; CHECK-NEXT: fld ft0, %lo(.LCPI23_0)(a1)
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; CHECK-NEXT: vmset.m v16
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v16
Expand Down Expand Up @@ -559,7 +559,7 @@ define <16 x double> @vp_ceil_v16f64_unmasked(<16 x double> %va, i32 zeroext %ev
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, %hi(.LCPI25_0)
; CHECK-NEXT: fld ft0, %lo(.LCPI25_0)(a1)
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; CHECK-NEXT: vmset.m v16
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v16
Expand All @@ -585,7 +585,7 @@ define <32 x double> @vp_ceil_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroex
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v1, v0
; CHECK-NEXT: li a1, 0
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: addi a2, a0, -16
; CHECK-NEXT: vslidedown.vi v2, v0, 2
; CHECK-NEXT: bltu a0, a2, .LBB26_2
Expand Down Expand Up @@ -645,7 +645,7 @@ define <32 x double> @vp_ceil_v32f64_unmasked(<32 x double> %va, i32 zeroext %ev
; CHECK-LABEL: vp_ceil_v32f64_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: li a1, 0
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; CHECK-NEXT: addi a2, a0, -16
; CHECK-NEXT: vmset.m v1
; CHECK-NEXT: bltu a0, a2, .LBB27_2
Expand Down
210 changes: 105 additions & 105 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz.ll

Large diffs are not rendered by default.

118 changes: 59 additions & 59 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop.ll

Large diffs are not rendered by default.

214 changes: 107 additions & 107 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz.ll

Large diffs are not rendered by default.

8 changes: 4 additions & 4 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-elen.ll
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
define void @add_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
; CHECK-LABEL: add_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vle32.v v9, (a1)
; CHECK-NEXT: vadd.vv v8, v8, v9
Expand Down Expand Up @@ -70,7 +70,7 @@ define void @add_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
define void @add_v2i32(<2 x i32>* %x, <2 x i32>* %y) {
; CHECK-LABEL: add_v2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vle32.v v9, (a1)
; CHECK-NEXT: vadd.vv v8, v8, v9
Expand Down Expand Up @@ -117,7 +117,7 @@ define void @add_v1i64(<1 x i64>* %x, <1 x i64>* %y) {
define void @fadd_v4f32(<4 x float>* %x, <4 x float>* %y) {
; CHECK-LABEL: fadd_v4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vle32.v v9, (a1)
; CHECK-NEXT: vfadd.vv v8, v8, v9
Expand Down Expand Up @@ -154,7 +154,7 @@ define void @fadd_v2f64(<2 x double>* %x, <2 x double>* %y) {
define void @fadd_v2f32(<2 x float>* %x, <2 x float>* %y) {
; CHECK-LABEL: fadd_v2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vle32.v v9, (a1)
; CHECK-NEXT: vfadd.vv v8, v8, v9
Expand Down
900 changes: 450 additions & 450 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll

Large diffs are not rendered by default.

90 changes: 45 additions & 45 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
define i1 @extractelt_v1i1(<1 x i8>* %x, i64 %idx) nounwind {
; CHECK-LABEL: extractelt_v1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
Expand All @@ -25,12 +25,12 @@ define i1 @extractelt_v1i1(<1 x i8>* %x, i64 %idx) nounwind {
define i1 @extractelt_v2i1(<2 x i8>* %x, i64 %idx) nounwind {
; CHECK-LABEL: extractelt_v2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
Expand All @@ -43,12 +43,12 @@ define i1 @extractelt_v2i1(<2 x i8>* %x, i64 %idx) nounwind {
define i1 @extractelt_v4i1(<4 x i8>* %x, i64 %idx) nounwind {
; CHECK-LABEL: extractelt_v4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
Expand All @@ -61,7 +61,7 @@ define i1 @extractelt_v4i1(<4 x i8>* %x, i64 %idx) nounwind {
define i1 @extractelt_v8i1(<8 x i8>* %x, i64 %idx) nounwind {
; RV32-LABEL: extractelt_v8i1:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV32-NEXT: vle8.v v8, (a0)
; RV32-NEXT: vmseq.vi v8, v8, 0
; RV32-NEXT: vmv.x.s a0, v8
Expand All @@ -71,7 +71,7 @@ define i1 @extractelt_v8i1(<8 x i8>* %x, i64 %idx) nounwind {
;
; RV64-LABEL: extractelt_v8i1:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64-NEXT: vle8.v v8, (a0)
; RV64-NEXT: vmseq.vi v8, v8, 0
; RV64-NEXT: vmv.x.s a0, v8
Expand All @@ -81,7 +81,7 @@ define i1 @extractelt_v8i1(<8 x i8>* %x, i64 %idx) nounwind {
;
; RV32ZBS-LABEL: extractelt_v8i1:
; RV32ZBS: # %bb.0:
; RV32ZBS-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV32ZBS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV32ZBS-NEXT: vle8.v v8, (a0)
; RV32ZBS-NEXT: vmseq.vi v8, v8, 0
; RV32ZBS-NEXT: vmv.x.s a0, v8
Expand All @@ -90,7 +90,7 @@ define i1 @extractelt_v8i1(<8 x i8>* %x, i64 %idx) nounwind {
;
; RV64ZBS-LABEL: extractelt_v8i1:
; RV64ZBS: # %bb.0:
; RV64ZBS-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; RV64ZBS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64ZBS-NEXT: vle8.v v8, (a0)
; RV64ZBS-NEXT: vmseq.vi v8, v8, 0
; RV64ZBS-NEXT: vmv.x.s a0, v8
Expand All @@ -105,42 +105,42 @@ define i1 @extractelt_v8i1(<8 x i8>* %x, i64 %idx) nounwind {
define i1 @extractelt_v16i1(<16 x i8>* %x, i64 %idx) nounwind {
; RV32-LABEL: extractelt_v16i1:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV32-NEXT: vle8.v v8, (a0)
; RV32-NEXT: vmseq.vi v8, v8, 0
; RV32-NEXT: vsetivli zero, 0, e16, mf4, ta, mu
; RV32-NEXT: vsetivli zero, 0, e16, mf4, ta, ma
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: srl a0, a0, a1
; RV32-NEXT: andi a0, a0, 1
; RV32-NEXT: ret
;
; RV64-LABEL: extractelt_v16i1:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV64-NEXT: vle8.v v8, (a0)
; RV64-NEXT: vmseq.vi v8, v8, 0
; RV64-NEXT: vsetivli zero, 0, e16, mf4, ta, mu
; RV64-NEXT: vsetivli zero, 0, e16, mf4, ta, ma
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: srl a0, a0, a1
; RV64-NEXT: andi a0, a0, 1
; RV64-NEXT: ret
;
; RV32ZBS-LABEL: extractelt_v16i1:
; RV32ZBS: # %bb.0:
; RV32ZBS-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; RV32ZBS-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV32ZBS-NEXT: vle8.v v8, (a0)
; RV32ZBS-NEXT: vmseq.vi v8, v8, 0
; RV32ZBS-NEXT: vsetivli zero, 0, e16, mf4, ta, mu
; RV32ZBS-NEXT: vsetivli zero, 0, e16, mf4, ta, ma
; RV32ZBS-NEXT: vmv.x.s a0, v8
; RV32ZBS-NEXT: bext a0, a0, a1
; RV32ZBS-NEXT: ret
;
; RV64ZBS-LABEL: extractelt_v16i1:
; RV64ZBS: # %bb.0:
; RV64ZBS-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; RV64ZBS-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV64ZBS-NEXT: vle8.v v8, (a0)
; RV64ZBS-NEXT: vmseq.vi v8, v8, 0
; RV64ZBS-NEXT: vsetivli zero, 0, e16, mf4, ta, mu
; RV64ZBS-NEXT: vsetivli zero, 0, e16, mf4, ta, ma
; RV64ZBS-NEXT: vmv.x.s a0, v8
; RV64ZBS-NEXT: bext a0, a0, a1
; RV64ZBS-NEXT: ret
Expand All @@ -154,10 +154,10 @@ define i1 @extractelt_v32i1(<32 x i8>* %x, i64 %idx) nounwind {
; RV32-LABEL: extractelt_v32i1:
; RV32: # %bb.0:
; RV32-NEXT: li a2, 32
; RV32-NEXT: vsetvli zero, a2, e8, m2, ta, mu
; RV32-NEXT: vsetvli zero, a2, e8, m2, ta, ma
; RV32-NEXT: vle8.v v8, (a0)
; RV32-NEXT: vmseq.vi v10, v8, 0
; RV32-NEXT: vsetivli zero, 0, e32, mf2, ta, mu
; RV32-NEXT: vsetivli zero, 0, e32, mf2, ta, ma
; RV32-NEXT: vmv.x.s a0, v10
; RV32-NEXT: srl a0, a0, a1
; RV32-NEXT: andi a0, a0, 1
Expand All @@ -166,10 +166,10 @@ define i1 @extractelt_v32i1(<32 x i8>* %x, i64 %idx) nounwind {
; RV64-LABEL: extractelt_v32i1:
; RV64: # %bb.0:
; RV64-NEXT: li a2, 32
; RV64-NEXT: vsetvli zero, a2, e8, m2, ta, mu
; RV64-NEXT: vsetvli zero, a2, e8, m2, ta, ma
; RV64-NEXT: vle8.v v8, (a0)
; RV64-NEXT: vmseq.vi v10, v8, 0
; RV64-NEXT: vsetivli zero, 0, e32, mf2, ta, mu
; RV64-NEXT: vsetivli zero, 0, e32, mf2, ta, ma
; RV64-NEXT: vmv.x.s a0, v10
; RV64-NEXT: srl a0, a0, a1
; RV64-NEXT: andi a0, a0, 1
Expand All @@ -178,21 +178,21 @@ define i1 @extractelt_v32i1(<32 x i8>* %x, i64 %idx) nounwind {
; RV32ZBS-LABEL: extractelt_v32i1:
; RV32ZBS: # %bb.0:
; RV32ZBS-NEXT: li a2, 32
; RV32ZBS-NEXT: vsetvli zero, a2, e8, m2, ta, mu
; RV32ZBS-NEXT: vsetvli zero, a2, e8, m2, ta, ma
; RV32ZBS-NEXT: vle8.v v8, (a0)
; RV32ZBS-NEXT: vmseq.vi v10, v8, 0
; RV32ZBS-NEXT: vsetivli zero, 0, e32, mf2, ta, mu
; RV32ZBS-NEXT: vsetivli zero, 0, e32, mf2, ta, ma
; RV32ZBS-NEXT: vmv.x.s a0, v10
; RV32ZBS-NEXT: bext a0, a0, a1
; RV32ZBS-NEXT: ret
;
; RV64ZBS-LABEL: extractelt_v32i1:
; RV64ZBS: # %bb.0:
; RV64ZBS-NEXT: li a2, 32
; RV64ZBS-NEXT: vsetvli zero, a2, e8, m2, ta, mu
; RV64ZBS-NEXT: vsetvli zero, a2, e8, m2, ta, ma
; RV64ZBS-NEXT: vle8.v v8, (a0)
; RV64ZBS-NEXT: vmseq.vi v10, v8, 0
; RV64ZBS-NEXT: vsetivli zero, 0, e32, mf2, ta, mu
; RV64ZBS-NEXT: vsetivli zero, 0, e32, mf2, ta, ma
; RV64ZBS-NEXT: vmv.x.s a0, v10
; RV64ZBS-NEXT: bext a0, a0, a1
; RV64ZBS-NEXT: ret
Expand All @@ -206,11 +206,11 @@ define i1 @extractelt_v64i1(<64 x i8>* %x, i64 %idx) nounwind {
; RV32-LABEL: extractelt_v64i1:
; RV32: # %bb.0:
; RV32-NEXT: li a2, 64
; RV32-NEXT: vsetvli zero, a2, e8, m4, ta, mu
; RV32-NEXT: vsetvli zero, a2, e8, m4, ta, ma
; RV32-NEXT: vle8.v v8, (a0)
; RV32-NEXT: vmseq.vi v12, v8, 0
; RV32-NEXT: srli a0, a1, 5
; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, mu
; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; RV32-NEXT: vslidedown.vx v8, v12, a0
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: srl a0, a0, a1
Expand All @@ -220,10 +220,10 @@ define i1 @extractelt_v64i1(<64 x i8>* %x, i64 %idx) nounwind {
; RV64-LABEL: extractelt_v64i1:
; RV64: # %bb.0:
; RV64-NEXT: li a2, 64
; RV64-NEXT: vsetvli zero, a2, e8, m4, ta, mu
; RV64-NEXT: vsetvli zero, a2, e8, m4, ta, ma
; RV64-NEXT: vle8.v v8, (a0)
; RV64-NEXT: vmseq.vi v12, v8, 0
; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, mu
; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, ma
; RV64-NEXT: vmv.x.s a0, v12
; RV64-NEXT: srl a0, a0, a1
; RV64-NEXT: andi a0, a0, 1
Expand All @@ -232,11 +232,11 @@ define i1 @extractelt_v64i1(<64 x i8>* %x, i64 %idx) nounwind {
; RV32ZBS-LABEL: extractelt_v64i1:
; RV32ZBS: # %bb.0:
; RV32ZBS-NEXT: li a2, 64
; RV32ZBS-NEXT: vsetvli zero, a2, e8, m4, ta, mu
; RV32ZBS-NEXT: vsetvli zero, a2, e8, m4, ta, ma
; RV32ZBS-NEXT: vle8.v v8, (a0)
; RV32ZBS-NEXT: vmseq.vi v12, v8, 0
; RV32ZBS-NEXT: srli a0, a1, 5
; RV32ZBS-NEXT: vsetivli zero, 1, e32, mf2, ta, mu
; RV32ZBS-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; RV32ZBS-NEXT: vslidedown.vx v8, v12, a0
; RV32ZBS-NEXT: vmv.x.s a0, v8
; RV32ZBS-NEXT: bext a0, a0, a1
Expand All @@ -245,10 +245,10 @@ define i1 @extractelt_v64i1(<64 x i8>* %x, i64 %idx) nounwind {
; RV64ZBS-LABEL: extractelt_v64i1:
; RV64ZBS: # %bb.0:
; RV64ZBS-NEXT: li a2, 64
; RV64ZBS-NEXT: vsetvli zero, a2, e8, m4, ta, mu
; RV64ZBS-NEXT: vsetvli zero, a2, e8, m4, ta, ma
; RV64ZBS-NEXT: vle8.v v8, (a0)
; RV64ZBS-NEXT: vmseq.vi v12, v8, 0
; RV64ZBS-NEXT: vsetivli zero, 0, e64, m1, ta, mu
; RV64ZBS-NEXT: vsetivli zero, 0, e64, m1, ta, ma
; RV64ZBS-NEXT: vmv.x.s a0, v12
; RV64ZBS-NEXT: bext a0, a0, a1
; RV64ZBS-NEXT: ret
Expand All @@ -262,11 +262,11 @@ define i1 @extractelt_v128i1(<128 x i8>* %x, i64 %idx) nounwind {
; RV32-LABEL: extractelt_v128i1:
; RV32: # %bb.0:
; RV32-NEXT: li a2, 128
; RV32-NEXT: vsetvli zero, a2, e8, m8, ta, mu
; RV32-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; RV32-NEXT: vle8.v v8, (a0)
; RV32-NEXT: vmseq.vi v16, v8, 0
; RV32-NEXT: srli a0, a1, 5
; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, mu
; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vx v8, v16, a0
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: srl a0, a0, a1
Expand All @@ -276,11 +276,11 @@ define i1 @extractelt_v128i1(<128 x i8>* %x, i64 %idx) nounwind {
; RV64-LABEL: extractelt_v128i1:
; RV64: # %bb.0:
; RV64-NEXT: li a2, 128
; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, mu
; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; RV64-NEXT: vle8.v v8, (a0)
; RV64-NEXT: vmseq.vi v16, v8, 0
; RV64-NEXT: srli a0, a1, 6
; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu
; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV64-NEXT: vslidedown.vx v8, v16, a0
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: srl a0, a0, a1
Expand All @@ -290,11 +290,11 @@ define i1 @extractelt_v128i1(<128 x i8>* %x, i64 %idx) nounwind {
; RV32ZBS-LABEL: extractelt_v128i1:
; RV32ZBS: # %bb.0:
; RV32ZBS-NEXT: li a2, 128
; RV32ZBS-NEXT: vsetvli zero, a2, e8, m8, ta, mu
; RV32ZBS-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; RV32ZBS-NEXT: vle8.v v8, (a0)
; RV32ZBS-NEXT: vmseq.vi v16, v8, 0
; RV32ZBS-NEXT: srli a0, a1, 5
; RV32ZBS-NEXT: vsetivli zero, 1, e32, m1, ta, mu
; RV32ZBS-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZBS-NEXT: vslidedown.vx v8, v16, a0
; RV32ZBS-NEXT: vmv.x.s a0, v8
; RV32ZBS-NEXT: bext a0, a0, a1
Expand All @@ -303,11 +303,11 @@ define i1 @extractelt_v128i1(<128 x i8>* %x, i64 %idx) nounwind {
; RV64ZBS-LABEL: extractelt_v128i1:
; RV64ZBS: # %bb.0:
; RV64ZBS-NEXT: li a2, 128
; RV64ZBS-NEXT: vsetvli zero, a2, e8, m8, ta, mu
; RV64ZBS-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; RV64ZBS-NEXT: vle8.v v8, (a0)
; RV64ZBS-NEXT: vmseq.vi v16, v8, 0
; RV64ZBS-NEXT: srli a0, a1, 6
; RV64ZBS-NEXT: vsetivli zero, 1, e64, m1, ta, mu
; RV64ZBS-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV64ZBS-NEXT: vslidedown.vx v8, v16, a0
; RV64ZBS-NEXT: vmv.x.s a0, v8
; RV64ZBS-NEXT: bext a0, a0, a1
Expand All @@ -328,7 +328,7 @@ define i1 @extractelt_v256i1(<256 x i8>* %x, i64 %idx) nounwind {
; RV32-NEXT: andi sp, sp, -128
; RV32-NEXT: andi a1, a1, 255
; RV32-NEXT: li a2, 128
; RV32-NEXT: vsetvli zero, a2, e8, m8, ta, mu
; RV32-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; RV32-NEXT: addi a2, a0, 128
; RV32-NEXT: vle8.v v16, (a2)
; RV32-NEXT: vle8.v v24, (a0)
Expand Down Expand Up @@ -359,7 +359,7 @@ define i1 @extractelt_v256i1(<256 x i8>* %x, i64 %idx) nounwind {
; RV64-NEXT: andi sp, sp, -128
; RV64-NEXT: andi a1, a1, 255
; RV64-NEXT: li a2, 128
; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, mu
; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; RV64-NEXT: addi a2, a0, 128
; RV64-NEXT: vle8.v v16, (a2)
; RV64-NEXT: vle8.v v24, (a0)
Expand Down Expand Up @@ -390,7 +390,7 @@ define i1 @extractelt_v256i1(<256 x i8>* %x, i64 %idx) nounwind {
; RV32ZBS-NEXT: andi sp, sp, -128
; RV32ZBS-NEXT: andi a1, a1, 255
; RV32ZBS-NEXT: li a2, 128
; RV32ZBS-NEXT: vsetvli zero, a2, e8, m8, ta, mu
; RV32ZBS-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; RV32ZBS-NEXT: addi a2, a0, 128
; RV32ZBS-NEXT: vle8.v v16, (a2)
; RV32ZBS-NEXT: vle8.v v24, (a0)
Expand Down Expand Up @@ -421,7 +421,7 @@ define i1 @extractelt_v256i1(<256 x i8>* %x, i64 %idx) nounwind {
; RV64ZBS-NEXT: andi sp, sp, -128
; RV64ZBS-NEXT: andi a1, a1, 255
; RV64ZBS-NEXT: li a2, 128
; RV64ZBS-NEXT: vsetvli zero, a2, e8, m8, ta, mu
; RV64ZBS-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; RV64ZBS-NEXT: addi a2, a0, 128
; RV64ZBS-NEXT: vle8.v v16, (a2)
; RV64ZBS-NEXT: vle8.v v24, (a0)
Expand Down
250 changes: 125 additions & 125 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll

Large diffs are not rendered by default.

220 changes: 110 additions & 110 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll

Large diffs are not rendered by default.

30 changes: 15 additions & 15 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ define <2 x half> @vp_floor_v2f16_unmasked(<2 x half> %va, i32 zeroext %evl) {
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, %hi(.LCPI1_0)
; CHECK-NEXT: flh ft0, %lo(.LCPI1_0)(a1)
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vmset.m v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vfabs.v v9, v8, v0.t
Expand Down Expand Up @@ -71,7 +71,7 @@ define <4 x half> @vp_floor_v4f16_unmasked(<4 x half> %va, i32 zeroext %evl) {
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, %hi(.LCPI3_0)
; CHECK-NEXT: flh ft0, %lo(.LCPI3_0)(a1)
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; CHECK-NEXT: vmset.m v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vfabs.v v9, v8, v0.t
Expand Down Expand Up @@ -113,7 +113,7 @@ define <8 x half> @vp_floor_v8f16_unmasked(<8 x half> %va, i32 zeroext %evl) {
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, %hi(.LCPI5_0)
; CHECK-NEXT: flh ft0, %lo(.LCPI5_0)(a1)
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmset.m v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vfabs.v v9, v8, v0.t
Expand Down Expand Up @@ -157,7 +157,7 @@ define <16 x half> @vp_floor_v16f16_unmasked(<16 x half> %va, i32 zeroext %evl)
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, %hi(.LCPI7_0)
; CHECK-NEXT: flh ft0, %lo(.LCPI7_0)(a1)
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; CHECK-NEXT: vmset.m v10
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
Expand Down Expand Up @@ -201,7 +201,7 @@ define <2 x float> @vp_floor_v2f32_unmasked(<2 x float> %va, i32 zeroext %evl) {
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, %hi(.LCPI9_0)
; CHECK-NEXT: flw ft0, %lo(.LCPI9_0)(a1)
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vmset.m v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vfabs.v v9, v8, v0.t
Expand Down Expand Up @@ -243,7 +243,7 @@ define <4 x float> @vp_floor_v4f32_unmasked(<4 x float> %va, i32 zeroext %evl) {
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, %hi(.LCPI11_0)
; CHECK-NEXT: flw ft0, %lo(.LCPI11_0)(a1)
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; CHECK-NEXT: vmset.m v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vfabs.v v9, v8, v0.t
Expand Down Expand Up @@ -287,7 +287,7 @@ define <8 x float> @vp_floor_v8f32_unmasked(<8 x float> %va, i32 zeroext %evl) {
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, %hi(.LCPI13_0)
; CHECK-NEXT: flw ft0, %lo(.LCPI13_0)(a1)
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmset.m v10
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
Expand Down Expand Up @@ -333,7 +333,7 @@ define <16 x float> @vp_floor_v16f32_unmasked(<16 x float> %va, i32 zeroext %evl
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, %hi(.LCPI15_0)
; CHECK-NEXT: flw ft0, %lo(.LCPI15_0)(a1)
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; CHECK-NEXT: vmset.m v12
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
Expand Down Expand Up @@ -377,7 +377,7 @@ define <2 x double> @vp_floor_v2f64_unmasked(<2 x double> %va, i32 zeroext %evl)
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, %hi(.LCPI17_0)
; CHECK-NEXT: fld ft0, %lo(.LCPI17_0)(a1)
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vmset.m v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vfabs.v v9, v8, v0.t
Expand Down Expand Up @@ -421,7 +421,7 @@ define <4 x double> @vp_floor_v4f64_unmasked(<4 x double> %va, i32 zeroext %evl)
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, %hi(.LCPI19_0)
; CHECK-NEXT: fld ft0, %lo(.LCPI19_0)(a1)
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; CHECK-NEXT: vmset.m v10
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
Expand Down Expand Up @@ -467,7 +467,7 @@ define <8 x double> @vp_floor_v8f64_unmasked(<8 x double> %va, i32 zeroext %evl)
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, %hi(.LCPI21_0)
; CHECK-NEXT: fld ft0, %lo(.LCPI21_0)(a1)
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmset.m v12
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
Expand Down Expand Up @@ -513,7 +513,7 @@ define <15 x double> @vp_floor_v15f64_unmasked(<15 x double> %va, i32 zeroext %e
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, %hi(.LCPI23_0)
; CHECK-NEXT: fld ft0, %lo(.LCPI23_0)(a1)
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; CHECK-NEXT: vmset.m v16
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v16
Expand Down Expand Up @@ -559,7 +559,7 @@ define <16 x double> @vp_floor_v16f64_unmasked(<16 x double> %va, i32 zeroext %e
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, %hi(.LCPI25_0)
; CHECK-NEXT: fld ft0, %lo(.LCPI25_0)(a1)
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; CHECK-NEXT: vmset.m v16
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v16
Expand All @@ -585,7 +585,7 @@ define <32 x double> @vp_floor_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v1, v0
; CHECK-NEXT: li a1, 0
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, mu
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: addi a2, a0, -16
; CHECK-NEXT: vslidedown.vi v2, v0, 2
; CHECK-NEXT: bltu a0, a2, .LBB26_2
Expand Down Expand Up @@ -645,7 +645,7 @@ define <32 x double> @vp_floor_v32f64_unmasked(<32 x double> %va, i32 zeroext %e
; CHECK-LABEL: vp_floor_v32f64_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: li a1, 0
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; CHECK-NEXT: addi a2, a0, -16
; CHECK-NEXT: vmset.m v1
; CHECK-NEXT: bltu a0, a2, .LBB27_2
Expand Down
Loading