305 changes: 158 additions & 147 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll

Large diffs are not rendered by default.

24 changes: 12 additions & 12 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -390,23 +390,23 @@ declare <32 x double> @llvm.vp.uitofp.v32f64.v32i64(<32 x i64>, <32 x i1>, i32)
define <32 x double> @vuitofp_v32f64_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vuitofp_v32f64_v32i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: vslidedown.vi v24, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB25_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: vslidedown.vi v0, v0, 2
; CHECK-NEXT: addi a1, a0, -16
; CHECK-NEXT: sltu a2, a0, a1
; CHECK-NEXT: addi a2, a2, -1
; CHECK-NEXT: and a1, a2, a1
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: vfcvt.f.xu.v v16, v16, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB25_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB25_2:
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.f.xu.v v8, v8, v0.t
; CHECK-NEXT: addi a1, a0, -16
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vfcvt.f.xu.v v16, v16, v0.t
; CHECK-NEXT: vfcvt.f.xu.v v8, v8, v0.t
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.uitofp.v32f64.v32i64(<32 x i64> %va, <32 x i1> %m, i32 %evl)
ret <32 x double> %v
Expand Down
48 changes: 24 additions & 24 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1528,47 +1528,47 @@ declare <32 x i64> @llvm.vp.add.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32)
define <32 x i64> @vadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vadd_vx_v32i64:
; RV32: # %bb.0:
; RV32-NEXT: vmv1r.v v1, v0
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v1, v0, 2
; RV32-NEXT: vslidedown.vi v0, v0, 2
; RV32-NEXT: li a1, 32
; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; RV32-NEXT: li a2, 16
; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: mv a1, a0
; RV32-NEXT: bltu a0, a2, .LBB108_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: addi a1, a0, -16
; RV32-NEXT: sltu a2, a0, a1
; RV32-NEXT: addi a2, a2, -1
; RV32-NEXT: and a1, a2, a1
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: li a1, 16
; RV32-NEXT: vadd.vv v16, v16, v24, v0.t
; RV32-NEXT: bltu a0, a1, .LBB108_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: li a0, 16
; RV32-NEXT: .LBB108_2:
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vadd.vv v8, v8, v24, v0.t
; RV32-NEXT: addi a1, a0, -16
; RV32-NEXT: sltu a0, a0, a1
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v1
; RV32-NEXT: vadd.vv v16, v16, v24, v0.t
; RV32-NEXT: vadd.vv v8, v8, v24, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vadd_vx_v32i64:
; RV64: # %bb.0:
; RV64-NEXT: vmv1r.v v24, v0
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: li a2, 16
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a1, a0
; RV64-NEXT: bltu a0, a2, .LBB108_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: vslidedown.vi v0, v0, 2
; RV64-NEXT: addi a1, a0, -16
; RV64-NEXT: sltu a2, a0, a1
; RV64-NEXT: addi a2, a2, -1
; RV64-NEXT: and a1, a2, a1
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: li a1, 16
; RV64-NEXT: vadd.vi v16, v16, -1, v0.t
; RV64-NEXT: bltu a0, a1, .LBB108_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: li a0, 16
; RV64-NEXT: .LBB108_2:
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vadd.vi v8, v8, -1, v0.t
; RV64-NEXT: addi a1, a0, -16
; RV64-NEXT: sltu a0, a0, a1
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a1
; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
; RV64-NEXT: vadd.vi v16, v16, -1, v0.t
; RV64-NEXT: vadd.vi v8, v8, -1, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
%vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
Expand Down
51 changes: 30 additions & 21 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -324,37 +324,46 @@ define <32 x double> @vfsgnj_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a1)
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: vmv1r.v v1, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v1, v0, 2
; CHECK-NEXT: vslidedown.vi v0, v0, 2
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: vle64.v v8, (a1)
; CHECK-NEXT: addi a1, a2, -16
; CHECK-NEXT: sltu a3, a2, a1
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a1, a3, a1
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: mv a0, a2
; CHECK-NEXT: bltu a2, a1, .LBB26_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: vfsgnj.vv v16, v16, v8, v0.t
; CHECK-NEXT: bltu a2, a0, .LBB26_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: .LBB26_2:
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v24, v0.t
; CHECK-NEXT: addi a0, a2, -16
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v1
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfsgnj.vv v16, v16, v24, v0.t
; CHECK-NEXT: vfsgnj.vv v8, v8, v24, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
Expand Down
24 changes: 12 additions & 12 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -405,23 +405,23 @@ declare <32 x double> @llvm.vp.fabs.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vfabs_vv_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfabs_vv_v32f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: vslidedown.vi v24, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB26_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: vslidedown.vi v0, v0, 2
; CHECK-NEXT: addi a1, a0, -16
; CHECK-NEXT: sltu a2, a0, a1
; CHECK-NEXT: addi a2, a2, -1
; CHECK-NEXT: and a1, a2, a1
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: vfabs.v v16, v16, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB26_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB26_2:
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v8, v8, v0.t
; CHECK-NEXT: addi a1, a0, -16
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vfabs.v v16, v16, v0.t
; CHECK-NEXT: vfabs.v v8, v8, v0.t
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.fabs.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
ret <32 x double> %v
Expand Down
144 changes: 72 additions & 72 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfcmp-constrained-sdnode.ll
Original file line number Diff line number Diff line change
Expand Up @@ -3271,9 +3271,9 @@ define <32 x i1> @fcmp_ogt_vf_v32f16(<32 x half> %va, half %b) nounwind strictfp
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vfmv.v.f v12, fa0
; CHECK-NEXT: vmfeq.vf v16, v12, fa0
; CHECK-NEXT: vmfeq.vv v12, v8, v8
; CHECK-NEXT: vmand.mm v12, v16, v12
; CHECK-NEXT: vmfeq.vv v16, v8, v8
; CHECK-NEXT: vmfeq.vf v17, v12, fa0
; CHECK-NEXT: vmand.mm v12, v17, v16
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmfgt.vf v12, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v12
Expand All @@ -3290,9 +3290,9 @@ define <32 x i1> @fcmp_ogt_fv_v32f16(<32 x half> %va, half %b) nounwind strictfp
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vfmv.v.f v12, fa0
; CHECK-NEXT: vmfeq.vf v16, v12, fa0
; CHECK-NEXT: vmfeq.vv v12, v8, v8
; CHECK-NEXT: vmand.mm v12, v12, v16
; CHECK-NEXT: vmfeq.vv v16, v8, v8
; CHECK-NEXT: vmfeq.vf v17, v12, fa0
; CHECK-NEXT: vmand.mm v12, v16, v17
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmflt.vf v12, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v12
Expand Down Expand Up @@ -3325,9 +3325,9 @@ define <32 x i1> @fcmp_oge_vf_v32f16(<32 x half> %va, half %b) nounwind strictfp
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vfmv.v.f v12, fa0
; CHECK-NEXT: vmfeq.vf v16, v12, fa0
; CHECK-NEXT: vmfeq.vv v12, v8, v8
; CHECK-NEXT: vmand.mm v12, v16, v12
; CHECK-NEXT: vmfeq.vv v16, v8, v8
; CHECK-NEXT: vmfeq.vf v17, v12, fa0
; CHECK-NEXT: vmand.mm v12, v17, v16
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmfge.vf v12, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v12
Expand All @@ -3344,9 +3344,9 @@ define <32 x i1> @fcmp_oge_fv_v32f16(<32 x half> %va, half %b) nounwind strictfp
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vfmv.v.f v12, fa0
; CHECK-NEXT: vmfeq.vf v16, v12, fa0
; CHECK-NEXT: vmfeq.vv v12, v8, v8
; CHECK-NEXT: vmand.mm v12, v12, v16
; CHECK-NEXT: vmfeq.vv v16, v8, v8
; CHECK-NEXT: vmfeq.vf v17, v12, fa0
; CHECK-NEXT: vmand.mm v12, v16, v17
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmfle.vf v12, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v12
Expand Down Expand Up @@ -3379,9 +3379,9 @@ define <32 x i1> @fcmp_olt_vf_v32f16(<32 x half> %va, half %b) nounwind strictfp
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vfmv.v.f v12, fa0
; CHECK-NEXT: vmfeq.vf v16, v12, fa0
; CHECK-NEXT: vmfeq.vv v12, v8, v8
; CHECK-NEXT: vmand.mm v12, v12, v16
; CHECK-NEXT: vmfeq.vv v16, v8, v8
; CHECK-NEXT: vmfeq.vf v17, v12, fa0
; CHECK-NEXT: vmand.mm v12, v16, v17
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmflt.vf v12, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v12
Expand All @@ -3398,9 +3398,9 @@ define <32 x i1> @fcmp_olt_fv_v32f16(<32 x half> %va, half %b) nounwind strictfp
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vfmv.v.f v12, fa0
; CHECK-NEXT: vmfeq.vf v16, v12, fa0
; CHECK-NEXT: vmfeq.vv v12, v8, v8
; CHECK-NEXT: vmand.mm v12, v16, v12
; CHECK-NEXT: vmfeq.vv v16, v8, v8
; CHECK-NEXT: vmfeq.vf v17, v12, fa0
; CHECK-NEXT: vmand.mm v12, v17, v16
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmfgt.vf v12, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v12
Expand Down Expand Up @@ -3433,9 +3433,9 @@ define <32 x i1> @fcmp_ole_vf_v32f16(<32 x half> %va, half %b) nounwind strictfp
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vfmv.v.f v12, fa0
; CHECK-NEXT: vmfeq.vf v16, v12, fa0
; CHECK-NEXT: vmfeq.vv v12, v8, v8
; CHECK-NEXT: vmand.mm v12, v12, v16
; CHECK-NEXT: vmfeq.vv v16, v8, v8
; CHECK-NEXT: vmfeq.vf v17, v12, fa0
; CHECK-NEXT: vmand.mm v12, v16, v17
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmfle.vf v12, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v12
Expand All @@ -3452,9 +3452,9 @@ define <32 x i1> @fcmp_ole_fv_v32f16(<32 x half> %va, half %b) nounwind strictfp
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vfmv.v.f v12, fa0
; CHECK-NEXT: vmfeq.vf v16, v12, fa0
; CHECK-NEXT: vmfeq.vv v12, v8, v8
; CHECK-NEXT: vmand.mm v12, v16, v12
; CHECK-NEXT: vmfeq.vv v16, v8, v8
; CHECK-NEXT: vmfeq.vf v17, v12, fa0
; CHECK-NEXT: vmand.mm v12, v17, v16
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmfge.vf v12, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v12
Expand Down Expand Up @@ -3489,9 +3489,9 @@ define <32 x i1> @fcmp_one_vf_v32f16(<32 x half> %va, half %b) nounwind strictfp
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vfmv.v.f v12, fa0
; CHECK-NEXT: vmfeq.vf v16, v12, fa0
; CHECK-NEXT: vmfeq.vv v12, v8, v8
; CHECK-NEXT: vmand.mm v12, v12, v16
; CHECK-NEXT: vmfeq.vv v16, v8, v8
; CHECK-NEXT: vmfeq.vf v17, v12, fa0
; CHECK-NEXT: vmand.mm v12, v16, v17
; CHECK-NEXT: vmv1r.v v13, v12
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmflt.vf v13, v8, fa0, v0.t
Expand All @@ -3510,9 +3510,9 @@ define <32 x i1> @fcmp_one_fv_v32f16(<32 x half> %va, half %b) nounwind strictfp
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vfmv.v.f v12, fa0
; CHECK-NEXT: vmfeq.vf v16, v12, fa0
; CHECK-NEXT: vmfeq.vv v12, v8, v8
; CHECK-NEXT: vmand.mm v12, v16, v12
; CHECK-NEXT: vmfeq.vv v16, v8, v8
; CHECK-NEXT: vmfeq.vf v17, v12, fa0
; CHECK-NEXT: vmand.mm v12, v17, v16
; CHECK-NEXT: vmv1r.v v13, v12
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmfgt.vf v13, v8, fa0, v0.t
Expand Down Expand Up @@ -3544,9 +3544,9 @@ define <32 x i1> @fcmp_ord_vf_v32f16(<32 x half> %va, half %b) nounwind strictfp
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfmv.v.f v12, fa0
; CHECK-NEXT: vmfeq.vf v16, v12, fa0
; CHECK-NEXT: vmfeq.vv v12, v8, v8
; CHECK-NEXT: vmand.mm v0, v12, v16
; CHECK-NEXT: vmfeq.vv v16, v8, v8
; CHECK-NEXT: vmfeq.vf v8, v12, fa0
; CHECK-NEXT: vmand.mm v0, v16, v8
; CHECK-NEXT: ret
%head = insertelement <32 x half> poison, half %b, i32 0
%splat = shufflevector <32 x half> %head, <32 x half> poison, <32 x i32> zeroinitializer
Expand All @@ -3560,9 +3560,9 @@ define <32 x i1> @fcmp_ord_fv_v32f16(<32 x half> %va, half %b) nounwind strictfp
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfmv.v.f v12, fa0
; CHECK-NEXT: vmfeq.vf v16, v12, fa0
; CHECK-NEXT: vmfeq.vv v12, v8, v8
; CHECK-NEXT: vmand.mm v0, v16, v12
; CHECK-NEXT: vmfeq.vv v16, v8, v8
; CHECK-NEXT: vmfeq.vf v8, v12, fa0
; CHECK-NEXT: vmand.mm v0, v8, v16
; CHECK-NEXT: ret
%head = insertelement <32 x half> poison, half %b, i32 0
%splat = shufflevector <32 x half> %head, <32 x half> poison, <32 x i32> zeroinitializer
Expand Down Expand Up @@ -3594,9 +3594,9 @@ define <32 x i1> @fcmp_ueq_vf_v32f16(<32 x half> %va, half %b) nounwind strictfp
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vfmv.v.f v12, fa0
; CHECK-NEXT: vmfeq.vf v16, v12, fa0
; CHECK-NEXT: vmfeq.vv v12, v8, v8
; CHECK-NEXT: vmand.mm v12, v12, v16
; CHECK-NEXT: vmfeq.vv v16, v8, v8
; CHECK-NEXT: vmfeq.vf v17, v12, fa0
; CHECK-NEXT: vmand.mm v12, v16, v17
; CHECK-NEXT: vmv1r.v v13, v12
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmflt.vf v13, v8, fa0, v0.t
Expand All @@ -3615,9 +3615,9 @@ define <32 x i1> @fcmp_ueq_fv_v32f16(<32 x half> %va, half %b) nounwind strictfp
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vfmv.v.f v12, fa0
; CHECK-NEXT: vmfeq.vf v16, v12, fa0
; CHECK-NEXT: vmfeq.vv v12, v8, v8
; CHECK-NEXT: vmand.mm v12, v16, v12
; CHECK-NEXT: vmfeq.vv v16, v8, v8
; CHECK-NEXT: vmfeq.vf v17, v12, fa0
; CHECK-NEXT: vmand.mm v12, v17, v16
; CHECK-NEXT: vmv1r.v v13, v12
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmfgt.vf v13, v8, fa0, v0.t
Expand Down Expand Up @@ -3652,9 +3652,9 @@ define <32 x i1> @fcmp_ugt_vf_v32f16(<32 x half> %va, half %b) nounwind strictfp
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vfmv.v.f v12, fa0
; CHECK-NEXT: vmfeq.vf v16, v12, fa0
; CHECK-NEXT: vmfeq.vv v12, v8, v8
; CHECK-NEXT: vmand.mm v12, v12, v16
; CHECK-NEXT: vmfeq.vv v16, v8, v8
; CHECK-NEXT: vmfeq.vf v17, v12, fa0
; CHECK-NEXT: vmand.mm v12, v16, v17
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmfle.vf v12, v8, fa0, v0.t
; CHECK-NEXT: vmnot.m v0, v12
Expand All @@ -3671,9 +3671,9 @@ define <32 x i1> @fcmp_ugt_fv_v32f16(<32 x half> %va, half %b) nounwind strictfp
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vfmv.v.f v12, fa0
; CHECK-NEXT: vmfeq.vf v16, v12, fa0
; CHECK-NEXT: vmfeq.vv v12, v8, v8
; CHECK-NEXT: vmand.mm v12, v16, v12
; CHECK-NEXT: vmfeq.vv v16, v8, v8
; CHECK-NEXT: vmfeq.vf v17, v12, fa0
; CHECK-NEXT: vmand.mm v12, v17, v16
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmfge.vf v12, v8, fa0, v0.t
; CHECK-NEXT: vmnot.m v0, v12
Expand Down Expand Up @@ -3706,9 +3706,9 @@ define <32 x i1> @fcmp_uge_vf_v32f16(<32 x half> %va, half %b) nounwind strictfp
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vfmv.v.f v12, fa0
; CHECK-NEXT: vmfeq.vf v16, v12, fa0
; CHECK-NEXT: vmfeq.vv v12, v8, v8
; CHECK-NEXT: vmand.mm v12, v12, v16
; CHECK-NEXT: vmfeq.vv v16, v8, v8
; CHECK-NEXT: vmfeq.vf v17, v12, fa0
; CHECK-NEXT: vmand.mm v12, v16, v17
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmflt.vf v12, v8, fa0, v0.t
; CHECK-NEXT: vmnot.m v0, v12
Expand All @@ -3725,9 +3725,9 @@ define <32 x i1> @fcmp_uge_fv_v32f16(<32 x half> %va, half %b) nounwind strictfp
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vfmv.v.f v12, fa0
; CHECK-NEXT: vmfeq.vf v16, v12, fa0
; CHECK-NEXT: vmfeq.vv v12, v8, v8
; CHECK-NEXT: vmand.mm v12, v16, v12
; CHECK-NEXT: vmfeq.vv v16, v8, v8
; CHECK-NEXT: vmfeq.vf v17, v12, fa0
; CHECK-NEXT: vmand.mm v12, v17, v16
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmfgt.vf v12, v8, fa0, v0.t
; CHECK-NEXT: vmnot.m v0, v12
Expand Down Expand Up @@ -3760,9 +3760,9 @@ define <32 x i1> @fcmp_ult_vf_v32f16(<32 x half> %va, half %b) nounwind strictfp
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vfmv.v.f v12, fa0
; CHECK-NEXT: vmfeq.vf v16, v12, fa0
; CHECK-NEXT: vmfeq.vv v12, v8, v8
; CHECK-NEXT: vmand.mm v12, v16, v12
; CHECK-NEXT: vmfeq.vv v16, v8, v8
; CHECK-NEXT: vmfeq.vf v17, v12, fa0
; CHECK-NEXT: vmand.mm v12, v17, v16
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmfge.vf v12, v8, fa0, v0.t
; CHECK-NEXT: vmnot.m v0, v12
Expand All @@ -3779,9 +3779,9 @@ define <32 x i1> @fcmp_ult_fv_v32f16(<32 x half> %va, half %b) nounwind strictfp
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vfmv.v.f v12, fa0
; CHECK-NEXT: vmfeq.vf v16, v12, fa0
; CHECK-NEXT: vmfeq.vv v12, v8, v8
; CHECK-NEXT: vmand.mm v12, v12, v16
; CHECK-NEXT: vmfeq.vv v16, v8, v8
; CHECK-NEXT: vmfeq.vf v17, v12, fa0
; CHECK-NEXT: vmand.mm v12, v16, v17
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmfle.vf v12, v8, fa0, v0.t
; CHECK-NEXT: vmnot.m v0, v12
Expand Down Expand Up @@ -3814,9 +3814,9 @@ define <32 x i1> @fcmp_ule_vf_v32f16(<32 x half> %va, half %b) nounwind strictfp
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vfmv.v.f v12, fa0
; CHECK-NEXT: vmfeq.vf v16, v12, fa0
; CHECK-NEXT: vmfeq.vv v12, v8, v8
; CHECK-NEXT: vmand.mm v12, v16, v12
; CHECK-NEXT: vmfeq.vv v16, v8, v8
; CHECK-NEXT: vmfeq.vf v17, v12, fa0
; CHECK-NEXT: vmand.mm v12, v17, v16
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmfgt.vf v12, v8, fa0, v0.t
; CHECK-NEXT: vmnot.m v0, v12
Expand All @@ -3833,9 +3833,9 @@ define <32 x i1> @fcmp_ule_fv_v32f16(<32 x half> %va, half %b) nounwind strictfp
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vfmv.v.f v12, fa0
; CHECK-NEXT: vmfeq.vf v16, v12, fa0
; CHECK-NEXT: vmfeq.vv v12, v8, v8
; CHECK-NEXT: vmand.mm v12, v12, v16
; CHECK-NEXT: vmfeq.vv v16, v8, v8
; CHECK-NEXT: vmfeq.vf v17, v12, fa0
; CHECK-NEXT: vmand.mm v12, v16, v17
; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vmflt.vf v12, v8, fa0, v0.t
; CHECK-NEXT: vmnot.m v0, v12
Expand Down Expand Up @@ -3902,9 +3902,9 @@ define <32 x i1> @fcmp_uno_vf_v32f16(<32 x half> %va, half %b) nounwind strictfp
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfmv.v.f v12, fa0
; CHECK-NEXT: vmfne.vf v16, v12, fa0
; CHECK-NEXT: vmfne.vv v12, v8, v8
; CHECK-NEXT: vmor.mm v0, v12, v16
; CHECK-NEXT: vmfne.vv v16, v8, v8
; CHECK-NEXT: vmfne.vf v8, v12, fa0
; CHECK-NEXT: vmor.mm v0, v16, v8
; CHECK-NEXT: ret
%head = insertelement <32 x half> poison, half %b, i32 0
%splat = shufflevector <32 x half> %head, <32 x half> poison, <32 x i32> zeroinitializer
Expand All @@ -3918,9 +3918,9 @@ define <32 x i1> @fcmp_uno_fv_v32f16(<32 x half> %va, half %b) nounwind strictfp
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfmv.v.f v12, fa0
; CHECK-NEXT: vmfne.vf v16, v12, fa0
; CHECK-NEXT: vmfne.vv v12, v8, v8
; CHECK-NEXT: vmor.mm v0, v16, v12
; CHECK-NEXT: vmfne.vv v16, v8, v8
; CHECK-NEXT: vmfne.vf v8, v12, fa0
; CHECK-NEXT: vmor.mm v0, v8, v16
; CHECK-NEXT: ret
%head = insertelement <32 x half> poison, half %b, i32 0
%splat = shufflevector <32 x half> %head, <32 x half> poison, <32 x i32> zeroinitializer
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2918,9 +2918,9 @@ define <32 x i1> @fcmps_ord_vf_v32f16(<32 x half> %va, half %b) nounwind strictf
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfmv.v.f v12, fa0
; CHECK-NEXT: vmfle.vf v16, v12, fa0
; CHECK-NEXT: vmfle.vv v12, v8, v8
; CHECK-NEXT: vmand.mm v0, v12, v16
; CHECK-NEXT: vmfle.vv v16, v8, v8
; CHECK-NEXT: vmfle.vf v8, v12, fa0
; CHECK-NEXT: vmand.mm v0, v16, v8
; CHECK-NEXT: ret
%head = insertelement <32 x half> poison, half %b, i32 0
%splat = shufflevector <32 x half> %head, <32 x half> poison, <32 x i32> zeroinitializer
Expand All @@ -2934,9 +2934,9 @@ define <32 x i1> @fcmps_ord_fv_v32f16(<32 x half> %va, half %b) nounwind strictf
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfmv.v.f v12, fa0
; CHECK-NEXT: vmfle.vf v16, v12, fa0
; CHECK-NEXT: vmfle.vv v12, v8, v8
; CHECK-NEXT: vmand.mm v0, v16, v12
; CHECK-NEXT: vmfle.vv v16, v8, v8
; CHECK-NEXT: vmfle.vf v8, v12, fa0
; CHECK-NEXT: vmand.mm v0, v8, v16
; CHECK-NEXT: ret
%head = insertelement <32 x half> poison, half %b, i32 0
%splat = shufflevector <32 x half> %head, <32 x half> poison, <32 x i32> zeroinitializer
Expand Down Expand Up @@ -3210,10 +3210,10 @@ define <32 x i1> @fcmps_uno_vf_v32f16(<32 x half> %va, half %b) nounwind strictf
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfmv.v.f v12, fa0
; CHECK-NEXT: vmfle.vf v16, v12, fa0
; CHECK-NEXT: vmfle.vv v12, v8, v8
; CHECK-NEXT: vmnot.m v8, v12
; CHECK-NEXT: vmorn.mm v0, v8, v16
; CHECK-NEXT: vmfle.vv v16, v8, v8
; CHECK-NEXT: vmnot.m v8, v16
; CHECK-NEXT: vmfle.vf v9, v12, fa0
; CHECK-NEXT: vmorn.mm v0, v8, v9
; CHECK-NEXT: ret
%head = insertelement <32 x half> poison, half %b, i32 0
%splat = shufflevector <32 x half> %head, <32 x half> poison, <32 x i32> zeroinitializer
Expand Down
80 changes: 43 additions & 37 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -896,70 +896,76 @@ define <32 x double> @vfma_vv_v32f64(<32 x double> %va, <32 x double> %b, <32 x
; CHECK-NEXT: slli a1, a1, 5
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
; CHECK-NEXT: addi a1, a2, 128
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a1)
; CHECK-NEXT: vmv1r.v v1, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: li a3, 24
; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: vle64.v v24, (a1)
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 2
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a2)
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v1, v0, 2
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a2)
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: addi a1, a2, 128
; CHECK-NEXT: addi a2, a4, -16
; CHECK-NEXT: sltu a3, a4, a2
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a2, a3, a2
; CHECK-NEXT: addi a3, a0, 128
; CHECK-NEXT: vle64.v v16, (a1)
; CHECK-NEXT: vle64.v v8, (a3)
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: mv a0, a4
; CHECK-NEXT: bltu a4, a1, .LBB50_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB50_2:
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmadd.vv v24, v8, v16, v0.t
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, a4, -16
; CHECK-NEXT: sltu a1, a4, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vfmadd.vv v8, v24, v16, v0.t
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: bltu a4, a0, .LBB50_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a4, 16
; CHECK-NEXT: .LBB50_2:
; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v1
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 24
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmadd.vv v8, v24, v16, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmadd.vv v16, v24, v8, v0.t
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 5
; CHECK-NEXT: add sp, sp, a0
Expand Down
51 changes: 30 additions & 21 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -416,37 +416,46 @@ define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a1)
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: vmv1r.v v1, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v1, v0, 2
; CHECK-NEXT: vslidedown.vi v0, v0, 2
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: vle64.v v8, (a1)
; CHECK-NEXT: addi a1, a2, -16
; CHECK-NEXT: sltu a3, a2, a1
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a1, a3, a1
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: mv a0, a2
; CHECK-NEXT: bltu a2, a1, .LBB26_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: vfmax.vv v16, v16, v8, v0.t
; CHECK-NEXT: bltu a2, a0, .LBB26_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: .LBB26_2:
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v24, v0.t
; CHECK-NEXT: addi a0, a2, -16
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v1
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmax.vv v16, v16, v24, v0.t
; CHECK-NEXT: vfmax.vv v8, v8, v24, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
Expand Down
51 changes: 30 additions & 21 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -416,37 +416,46 @@ define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a1)
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: vmv1r.v v1, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v1, v0, 2
; CHECK-NEXT: vslidedown.vi v0, v0, 2
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: vle64.v v8, (a1)
; CHECK-NEXT: addi a1, a2, -16
; CHECK-NEXT: sltu a3, a2, a1
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a1, a3, a1
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: mv a0, a2
; CHECK-NEXT: bltu a2, a1, .LBB26_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: vfmin.vv v16, v16, v8, v0.t
; CHECK-NEXT: bltu a2, a0, .LBB26_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: .LBB26_2:
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v24, v0.t
; CHECK-NEXT: addi a0, a2, -16
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v1
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmin.vv v16, v16, v24, v0.t
; CHECK-NEXT: vfmin.vv v8, v8, v24, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
Expand Down
80 changes: 43 additions & 37 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -660,70 +660,76 @@ define <32 x double> @vfma_vv_v32f64(<32 x double> %va, <32 x double> %b, <32 x
; CHECK-NEXT: slli a1, a1, 5
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
; CHECK-NEXT: addi a1, a2, 128
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a1)
; CHECK-NEXT: vmv1r.v v1, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: li a3, 24
; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: vle64.v v24, (a1)
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 2
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a2)
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v1, v0, 2
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a2)
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: addi a1, a2, 128
; CHECK-NEXT: addi a2, a4, -16
; CHECK-NEXT: sltu a3, a4, a2
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a2, a3, a2
; CHECK-NEXT: addi a3, a0, 128
; CHECK-NEXT: vle64.v v16, (a1)
; CHECK-NEXT: vle64.v v8, (a3)
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: mv a0, a4
; CHECK-NEXT: bltu a4, a1, .LBB50_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB50_2:
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmadd.vv v24, v8, v16, v0.t
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, a4, -16
; CHECK-NEXT: sltu a1, a4, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vfmadd.vv v8, v24, v16, v0.t
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: bltu a4, a0, .LBB50_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a4, 16
; CHECK-NEXT: .LBB50_2:
; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v1
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 24
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmadd.vv v8, v24, v16, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmadd.vv v16, v24, v8, v0.t
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 5
; CHECK-NEXT: add sp, sp, a0
Expand Down
24 changes: 12 additions & 12 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -405,23 +405,23 @@ declare <32 x double> @llvm.vp.fneg.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vfneg_vv_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfneg_vv_v32f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: vslidedown.vi v24, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB26_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: vslidedown.vi v0, v0, 2
; CHECK-NEXT: addi a1, a0, -16
; CHECK-NEXT: sltu a2, a0, a1
; CHECK-NEXT: addi a2, a2, -1
; CHECK-NEXT: and a1, a2, a1
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: vfneg.v v16, v16, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB26_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB26_2:
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vfneg.v v8, v8, v0.t
; CHECK-NEXT: addi a1, a0, -16
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vfneg.v v16, v16, v0.t
; CHECK-NEXT: vfneg.v v8, v8, v0.t
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.fneg.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
ret <32 x double> %v
Expand Down
24 changes: 12 additions & 12 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -405,23 +405,23 @@ declare <32 x double> @llvm.vp.sqrt.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vfsqrt_vv_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsqrt_vv_v32f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: vslidedown.vi v24, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB26_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: vslidedown.vi v0, v0, 2
; CHECK-NEXT: addi a1, a0, -16
; CHECK-NEXT: sltu a2, a0, a1
; CHECK-NEXT: addi a2, a2, -1
; CHECK-NEXT: and a1, a2, a1
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: vfsqrt.v v16, v16, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB26_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB26_2:
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vfsqrt.v v8, v8, v0.t
; CHECK-NEXT: addi a1, a0, -16
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vfsqrt.v v16, v16, v0.t
; CHECK-NEXT: vfsqrt.v v8, v8, v0.t
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.sqrt.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
ret <32 x double> %v
Expand Down
46 changes: 23 additions & 23 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1091,48 +1091,48 @@ declare <32 x i64> @llvm.vp.smax.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32)
define <32 x i64> @vmax_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vmax_vx_v32i64:
; RV32: # %bb.0:
; RV32-NEXT: vmv1r.v v1, v0
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v1, v0, 2
; RV32-NEXT: vslidedown.vi v0, v0, 2
; RV32-NEXT: li a1, 32
; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; RV32-NEXT: li a2, 16
; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: mv a1, a0
; RV32-NEXT: bltu a0, a2, .LBB74_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: addi a1, a0, -16
; RV32-NEXT: sltu a2, a0, a1
; RV32-NEXT: addi a2, a2, -1
; RV32-NEXT: and a1, a2, a1
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: li a1, 16
; RV32-NEXT: vmax.vv v16, v16, v24, v0.t
; RV32-NEXT: bltu a0, a1, .LBB74_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: li a0, 16
; RV32-NEXT: .LBB74_2:
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vmax.vv v8, v8, v24, v0.t
; RV32-NEXT: addi a1, a0, -16
; RV32-NEXT: sltu a0, a0, a1
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v1
; RV32-NEXT: vmax.vv v16, v16, v24, v0.t
; RV32-NEXT: vmax.vv v8, v8, v24, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vmax_vx_v32i64:
; RV64: # %bb.0:
; RV64-NEXT: vmv1r.v v24, v0
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
; RV64-NEXT: addi a1, a0, -16
; RV64-NEXT: sltu a2, a0, a1
; RV64-NEXT: addi a2, a2, -1
; RV64-NEXT: and a2, a2, a1
; RV64-NEXT: li a1, -1
; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV64-NEXT: li a2, 16
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a1, a0
; RV64-NEXT: vmax.vx v16, v16, a1, v0.t
; RV64-NEXT: bltu a0, a2, .LBB74_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: li a1, 16
; RV64-NEXT: li a0, 16
; RV64-NEXT: .LBB74_2:
; RV64-NEXT: li a2, -1
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vmax.vx v8, v8, a2, v0.t
; RV64-NEXT: addi a1, a0, -16
; RV64-NEXT: sltu a0, a0, a1
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a1
; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
; RV64-NEXT: vmax.vx v16, v16, a2, v0.t
; RV64-NEXT: vmax.vx v8, v8, a1, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
%vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
Expand Down
46 changes: 23 additions & 23 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1090,48 +1090,48 @@ declare <32 x i64> @llvm.vp.umax.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32)
define <32 x i64> @vmaxu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vmaxu_vx_v32i64:
; RV32: # %bb.0:
; RV32-NEXT: vmv1r.v v1, v0
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v1, v0, 2
; RV32-NEXT: vslidedown.vi v0, v0, 2
; RV32-NEXT: li a1, 32
; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; RV32-NEXT: li a2, 16
; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: mv a1, a0
; RV32-NEXT: bltu a0, a2, .LBB74_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: addi a1, a0, -16
; RV32-NEXT: sltu a2, a0, a1
; RV32-NEXT: addi a2, a2, -1
; RV32-NEXT: and a1, a2, a1
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: li a1, 16
; RV32-NEXT: vmaxu.vv v16, v16, v24, v0.t
; RV32-NEXT: bltu a0, a1, .LBB74_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: li a0, 16
; RV32-NEXT: .LBB74_2:
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vmaxu.vv v8, v8, v24, v0.t
; RV32-NEXT: addi a1, a0, -16
; RV32-NEXT: sltu a0, a0, a1
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v1
; RV32-NEXT: vmaxu.vv v16, v16, v24, v0.t
; RV32-NEXT: vmaxu.vv v8, v8, v24, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vmaxu_vx_v32i64:
; RV64: # %bb.0:
; RV64-NEXT: vmv1r.v v24, v0
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
; RV64-NEXT: addi a1, a0, -16
; RV64-NEXT: sltu a2, a0, a1
; RV64-NEXT: addi a2, a2, -1
; RV64-NEXT: and a2, a2, a1
; RV64-NEXT: li a1, -1
; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV64-NEXT: li a2, 16
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a1, a0
; RV64-NEXT: vmaxu.vx v16, v16, a1, v0.t
; RV64-NEXT: bltu a0, a2, .LBB74_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: li a1, 16
; RV64-NEXT: li a0, 16
; RV64-NEXT: .LBB74_2:
; RV64-NEXT: li a2, -1
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vmaxu.vx v8, v8, a2, v0.t
; RV64-NEXT: addi a1, a0, -16
; RV64-NEXT: sltu a0, a0, a1
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a1
; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
; RV64-NEXT: vmaxu.vx v16, v16, a2, v0.t
; RV64-NEXT: vmaxu.vx v8, v8, a1, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
%vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
Expand Down
46 changes: 23 additions & 23 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1091,48 +1091,48 @@ declare <32 x i64> @llvm.vp.smin.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32)
define <32 x i64> @vmin_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vmin_vx_v32i64:
; RV32: # %bb.0:
; RV32-NEXT: vmv1r.v v1, v0
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v1, v0, 2
; RV32-NEXT: vslidedown.vi v0, v0, 2
; RV32-NEXT: li a1, 32
; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; RV32-NEXT: li a2, 16
; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: mv a1, a0
; RV32-NEXT: bltu a0, a2, .LBB74_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: addi a1, a0, -16
; RV32-NEXT: sltu a2, a0, a1
; RV32-NEXT: addi a2, a2, -1
; RV32-NEXT: and a1, a2, a1
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: li a1, 16
; RV32-NEXT: vmin.vv v16, v16, v24, v0.t
; RV32-NEXT: bltu a0, a1, .LBB74_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: li a0, 16
; RV32-NEXT: .LBB74_2:
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vmin.vv v8, v8, v24, v0.t
; RV32-NEXT: addi a1, a0, -16
; RV32-NEXT: sltu a0, a0, a1
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v1
; RV32-NEXT: vmin.vv v16, v16, v24, v0.t
; RV32-NEXT: vmin.vv v8, v8, v24, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vmin_vx_v32i64:
; RV64: # %bb.0:
; RV64-NEXT: vmv1r.v v24, v0
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
; RV64-NEXT: addi a1, a0, -16
; RV64-NEXT: sltu a2, a0, a1
; RV64-NEXT: addi a2, a2, -1
; RV64-NEXT: and a2, a2, a1
; RV64-NEXT: li a1, -1
; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV64-NEXT: li a2, 16
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a1, a0
; RV64-NEXT: vmin.vx v16, v16, a1, v0.t
; RV64-NEXT: bltu a0, a2, .LBB74_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: li a1, 16
; RV64-NEXT: li a0, 16
; RV64-NEXT: .LBB74_2:
; RV64-NEXT: li a2, -1
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vmin.vx v8, v8, a2, v0.t
; RV64-NEXT: addi a1, a0, -16
; RV64-NEXT: sltu a0, a0, a1
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a1
; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
; RV64-NEXT: vmin.vx v16, v16, a2, v0.t
; RV64-NEXT: vmin.vx v8, v8, a1, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
%vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
Expand Down
46 changes: 23 additions & 23 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1090,48 +1090,48 @@ declare <32 x i64> @llvm.vp.umin.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32)
define <32 x i64> @vminu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vminu_vx_v32i64:
; RV32: # %bb.0:
; RV32-NEXT: vmv1r.v v1, v0
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v1, v0, 2
; RV32-NEXT: vslidedown.vi v0, v0, 2
; RV32-NEXT: li a1, 32
; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; RV32-NEXT: li a2, 16
; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: mv a1, a0
; RV32-NEXT: bltu a0, a2, .LBB74_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: addi a1, a0, -16
; RV32-NEXT: sltu a2, a0, a1
; RV32-NEXT: addi a2, a2, -1
; RV32-NEXT: and a1, a2, a1
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: li a1, 16
; RV32-NEXT: vminu.vv v16, v16, v24, v0.t
; RV32-NEXT: bltu a0, a1, .LBB74_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: li a0, 16
; RV32-NEXT: .LBB74_2:
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vminu.vv v8, v8, v24, v0.t
; RV32-NEXT: addi a1, a0, -16
; RV32-NEXT: sltu a0, a0, a1
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v1
; RV32-NEXT: vminu.vv v16, v16, v24, v0.t
; RV32-NEXT: vminu.vv v8, v8, v24, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vminu_vx_v32i64:
; RV64: # %bb.0:
; RV64-NEXT: vmv1r.v v24, v0
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
; RV64-NEXT: addi a1, a0, -16
; RV64-NEXT: sltu a2, a0, a1
; RV64-NEXT: addi a2, a2, -1
; RV64-NEXT: and a2, a2, a1
; RV64-NEXT: li a1, -1
; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV64-NEXT: li a2, 16
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a1, a0
; RV64-NEXT: vminu.vx v16, v16, a1, v0.t
; RV64-NEXT: bltu a0, a2, .LBB74_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: li a1, 16
; RV64-NEXT: li a0, 16
; RV64-NEXT: .LBB74_2:
; RV64-NEXT: li a2, -1
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vminu.vx v8, v8, a2, v0.t
; RV64-NEXT: addi a1, a0, -16
; RV64-NEXT: sltu a0, a0, a1
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a1
; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
; RV64-NEXT: vminu.vx v16, v16, a2, v0.t
; RV64-NEXT: vminu.vx v8, v8, a1, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
%vb = shufflevector <32 x i64> %elt.head, <32 x i64> poison, <32 x i32> zeroinitializer
Expand Down
637 changes: 334 additions & 303 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll

Large diffs are not rendered by default.

78 changes: 39 additions & 39 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll
Original file line number Diff line number Diff line change
Expand Up @@ -381,23 +381,24 @@ declare <32 x double> @llvm.vp.load.v32f64.p0(ptr, <32 x i1>, i32)
define <32 x double> @vpload_v32f64(ptr %ptr, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v32f64:
; CHECK: # %bb.0:
; CHECK-NEXT: li a3, 16
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: bltu a1, a3, .LBB31_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: .LBB31_2:
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: addi a2, a1, -16
; CHECK-NEXT: sltu a1, a1, a2
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a1, a1, a2
; CHECK-NEXT: addi a0, a0, 128
; CHECK-NEXT: sltu a3, a1, a2
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a2, a3, a2
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 2
; CHECK-NEXT: addi a3, a0, 128
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v16, (a3), v0.t
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: bltu a1, a2, .LBB31_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: .LBB31_2:
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v16, (a0), v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <32 x double> @llvm.vp.load.v32f64.p0(ptr %ptr, <32 x i1> %m, i32 %evl)
ret <32 x double> %load
Expand All @@ -410,50 +411,49 @@ declare <33 x double> @llvm.vp.load.v33f64.p0(ptr, <33 x i1>, i32)
define <33 x double> @vpload_v33f64(ptr %ptr, <33 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v33f64:
; CHECK: # %bb.0:
; CHECK-NEXT: li a4, 32
; CHECK-NEXT: addi a3, a2, -32
; CHECK-NEXT: sltu a4, a2, a3
; CHECK-NEXT: addi a4, a4, -1
; CHECK-NEXT: and a4, a4, a3
; CHECK-NEXT: li a3, 16
; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: mv a3, a2
; CHECK-NEXT: bltu a2, a4, .LBB32_2
; CHECK-NEXT: bltu a4, a3, .LBB32_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a3, 32
; CHECK-NEXT: li a4, 16
; CHECK-NEXT: .LBB32_2:
; CHECK-NEXT: addi a4, a3, -16
; CHECK-NEXT: sltu a5, a3, a4
; CHECK-NEXT: addi a5, a5, -1
; CHECK-NEXT: and a4, a5, a4
; CHECK-NEXT: addi a5, a1, 128
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v8, 2
; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v8, 4
; CHECK-NEXT: addi a5, a1, 256
; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v16, (a5), v0.t
; CHECK-NEXT: addi a4, a2, -32
; CHECK-NEXT: sltu a2, a2, a4
; CHECK-NEXT: addi a2, a2, -1
; CHECK-NEXT: and a4, a2, a4
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: bltu a4, a2, .LBB32_4
; CHECK-NEXT: li a4, 32
; CHECK-NEXT: bltu a2, a4, .LBB32_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: li a4, 16
; CHECK-NEXT: li a2, 32
; CHECK-NEXT: .LBB32_4:
; CHECK-NEXT: addi a5, a1, 256
; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v8, 4
; CHECK-NEXT: addi a4, a2, -16
; CHECK-NEXT: sltu a5, a2, a4
; CHECK-NEXT: addi a5, a5, -1
; CHECK-NEXT: and a4, a5, a4
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v8, 2
; CHECK-NEXT: addi a5, a1, 128
; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a5), v0.t
; CHECK-NEXT: bltu a3, a2, .LBB32_6
; CHECK-NEXT: bltu a2, a3, .LBB32_6
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: li a3, 16
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: .LBB32_6:
; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vle64.v v8, (a1), v0.t
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: vse64.v v16, (a1)
; CHECK-NEXT: vse64.v v24, (a1)
; CHECK-NEXT: addi a0, a0, 256
; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-NEXT: vse64.v v24, (a0)
; CHECK-NEXT: vse64.v v16, (a0)
; CHECK-NEXT: ret
%load = call <33 x double> @llvm.vp.load.v33f64.p0(ptr %ptr, <33 x i1> %m, i32 %evl)
ret <33 x double> %load
Expand Down
75 changes: 38 additions & 37 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1207,41 +1207,41 @@ define <32 x double> @vpmerge_vv_v32f64(<32 x double> %va, <32 x double> %vb, <3
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a1)
; CHECK-NEXT: vmv1r.v v1, v0
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: mv a0, a2
; CHECK-NEXT: bltu a2, a1, .LBB79_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB79_2:
; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma
; CHECK-NEXT: vmerge.vvm v24, v24, v8, v0
; CHECK-NEXT: addi a0, a2, -16
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: addi a1, a2, -16
; CHECK-NEXT: sltu a3, a2, a1
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a1, a3, a1
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: addi a0, a0, 128
; CHECK-NEXT: vle64.v v16, (a0)
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 2
; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma
; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0
; CHECK-NEXT: bltu a2, a0, .LBB79_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: .LBB79_2:
; CHECK-NEXT: vsetvli zero, a2, e64, m8, tu, ma
; CHECK-NEXT: vmv1r.v v0, v1
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmerge.vvm v16, v16, v8, v0
; CHECK-NEXT: vmv8r.v v8, v24
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmerge.vvm v8, v8, v24, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
Expand All @@ -1254,22 +1254,23 @@ define <32 x double> @vpmerge_vv_v32f64(<32 x double> %va, <32 x double> %vb, <3
define <32 x double> @vpmerge_vf_v32f64(double %a, <32 x double> %vb, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpmerge_vf_v32f64:
; CHECK: # %bb.0:
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB80_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: .LBB80_2:
; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma
; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: addi a1, a0, -16
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
; CHECK-NEXT: sltu a2, a0, a1
; CHECK-NEXT: addi a2, a2, -1
; CHECK-NEXT: and a1, a2, a1
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 2
; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma
; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, ma
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: vfmerge.vfm v16, v16, fa0, v0
; CHECK-NEXT: bltu a0, a1, .LBB80_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB80_2:
; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vfmerge.vfm v8, v8, fa0, v0
; CHECK-NEXT: ret
%elt.head = insertelement <32 x double> poison, double %a, i32 0
%va = shufflevector <32 x double> %elt.head, <32 x double> poison, <32 x i32> zeroinitializer
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll
Original file line number Diff line number Diff line change
Expand Up @@ -297,9 +297,9 @@ define void @vpstore_v32f64(<32 x double> %val, ptr %ptr, <32 x i1> %m, i32 zero
; CHECK-NEXT: sltu a1, a1, a2
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a1, a1, a2
; CHECK-NEXT: addi a0, a0, 128
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 2
; CHECK-NEXT: addi a0, a0, 128
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vse64.v v16, (a0), v0.t
; CHECK-NEXT: ret
Expand Down
106 changes: 64 additions & 42 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -417,35 +417,46 @@ define <32 x i64> @select_v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c, i32
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: addi a1, a2, -16
; CHECK-NEXT: sltu a3, a2, a1
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a1, a3, a1
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a1)
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: mv a0, a2
; CHECK-NEXT: bltu a2, a1, .LBB25_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB25_2:
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0
; CHECK-NEXT: addi a0, a2, -16
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: addi a3, sp, 16
; CHECK-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, a0, 128
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 2
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: vmerge.vvm v16, v8, v16, v0
; CHECK-NEXT: bltu a2, a0, .LBB25_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: .LBB25_2:
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmerge.vvm v16, v24, v16, v0
; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
Expand Down Expand Up @@ -574,35 +585,46 @@ define <64 x float> @select_v64f32(<64 x i1> %a, <64 x float> %b, <64 x float> %
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: addi a1, a2, -32
; CHECK-NEXT: sltu a3, a2, a1
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a1, a3, a1
; CHECK-NEXT: li a3, 32
; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; CHECK-NEXT: vle32.v v24, (a1)
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vle32.v v24, (a0)
; CHECK-NEXT: mv a0, a2
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: addi a4, sp, 16
; CHECK-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, a0, 128
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 4
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmerge.vvm v16, v8, v16, v0
; CHECK-NEXT: bltu a2, a3, .LBB35_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: li a2, 32
; CHECK-NEXT: .LBB35_2:
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0
; CHECK-NEXT: addi a0, a2, -32
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 4
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmerge.vvm v16, v24, v16, v0
; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
Expand Down
59 changes: 29 additions & 30 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -151,26 +151,26 @@ declare <32 x i64> @llvm.vp.zext.v32i64.v32i32(<32 x i32>, <32 x i1>, i32)
define <32 x i64> @vzext_v32i64_v32i32(<32 x i32> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vzext_v32i64_v32i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v1, v0
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: vslidedown.vi v16, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB12_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: .LBB12_2:
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vzext.vf2 v24, v8, v0.t
; CHECK-NEXT: vslidedown.vi v0, v0, 2
; CHECK-NEXT: addi a1, a0, -16
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
; CHECK-NEXT: sltu a2, a0, a1
; CHECK-NEXT: addi a2, a2, -1
; CHECK-NEXT: and a1, a2, a1
; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 16
; CHECK-NEXT: vslidedown.vi v24, v8, 16
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: vzext.vf2 v16, v24, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB12_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB12_2:
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vzext.vf2 v16, v8, v0.t
; CHECK-NEXT: vmv8r.v v8, v24
; CHECK-NEXT: vmv1r.v v0, v1
; CHECK-NEXT: vzext.vf2 v24, v8, v0.t
; CHECK-NEXT: vmv.v.v v8, v24
; CHECK-NEXT: ret
%v = call <32 x i64> @llvm.vp.zext.v32i64.v32i32(<32 x i32> %va, <32 x i1> %m, i32 %evl)
ret <32 x i64> %v
Expand All @@ -179,23 +179,22 @@ define <32 x i64> @vzext_v32i64_v32i32(<32 x i32> %va, <32 x i1> %m, i32 zeroext
define <32 x i64> @vzext_v32i64_v32i32_unmasked(<32 x i32> %va, i32 zeroext %evl) {
; CHECK-LABEL: vzext_v32i64_v32i32_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB13_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: .LBB13_2:
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vzext.vf2 v24, v8
; CHECK-NEXT: addi a1, a0, -16
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
; CHECK-NEXT: sltu a2, a0, a1
; CHECK-NEXT: addi a2, a2, -1
; CHECK-NEXT: and a1, a2, a1
; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 16
; CHECK-NEXT: vslidedown.vi v24, v8, 16
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: vzext.vf2 v16, v24
; CHECK-NEXT: bltu a0, a1, .LBB13_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB13_2:
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vzext.vf2 v16, v8
; CHECK-NEXT: vmv8r.v v8, v24
; CHECK-NEXT: vzext.vf2 v24, v8
; CHECK-NEXT: vmv.v.v v8, v24
; CHECK-NEXT: ret
%v = call <32 x i64> @llvm.vp.zext.v32i64.v32i32(<32 x i32> %va, <32 x i1> shufflevector (<32 x i1> insertelement (<32 x i1> undef, i1 true, i32 0), <32 x i1> undef, <32 x i32> zeroinitializer), i32 %evl)
ret <32 x i64> %v
Expand Down
32 changes: 22 additions & 10 deletions llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll
Original file line number Diff line number Diff line change
Expand Up @@ -212,17 +212,23 @@ define <vscale x 32 x half> @vfmax_nxv32f16_vv(<vscale x 32 x half> %a, <vscale
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: addi sp, sp, -16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: sub sp, sp, a0
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vmv8r.v v0, v8
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v0
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v8, v8
; ZVFHMIN-NEXT: vmfeq.vv v1, v24, v24
Expand All @@ -231,39 +237,45 @@ define <vscale x 32 x half> @vfmax_nxv32f16_vv(<vscale x 32 x half> %a, <vscale
; ZVFHMIN-NEXT: vmerge.vvm v8, v24, v8, v0
; ZVFHMIN-NEXT: vfmax.vv v24, v8, v16
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v16, v16
; ZVFHMIN-NEXT: vmfeq.vv v1, v8, v8
; ZVFHMIN-NEXT: vmerge.vvm v24, v16, v8, v0
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vmv1r.v v0, v1
; ZVFHMIN-NEXT: vmerge.vvm v16, v8, v16, v0
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfmax.vv v16, v16, v24
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add sp, sp, a0
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
Expand Down
32 changes: 22 additions & 10 deletions llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll
Original file line number Diff line number Diff line change
Expand Up @@ -212,17 +212,23 @@ define <vscale x 32 x half> @vfmin_nxv32f16_vv(<vscale x 32 x half> %a, <vscale
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: addi sp, sp, -16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: sub sp, sp, a0
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vmv8r.v v0, v8
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v0
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v8, v8
; ZVFHMIN-NEXT: vmfeq.vv v1, v24, v24
Expand All @@ -231,39 +237,45 @@ define <vscale x 32 x half> @vfmin_nxv32f16_vv(<vscale x 32 x half> %a, <vscale
; ZVFHMIN-NEXT: vmerge.vvm v8, v24, v8, v0
; ZVFHMIN-NEXT: vfmin.vv v24, v8, v16
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v16, v16
; ZVFHMIN-NEXT: vmfeq.vv v1, v8, v8
; ZVFHMIN-NEXT: vmerge.vvm v24, v16, v8, v0
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vmv1r.v v0, v1
; ZVFHMIN-NEXT: vmerge.vvm v16, v8, v16, v0
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfmin.vv v16, v16, v24
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add sp, sp, a0
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
Expand Down
370 changes: 186 additions & 184 deletions llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll

Large diffs are not rendered by default.

396 changes: 178 additions & 218 deletions llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll

Large diffs are not rendered by default.

630 changes: 310 additions & 320 deletions llvm/test/CodeGen/RISCV/rvv/named-vector-shuffle-reverse.ll

Large diffs are not rendered by default.

292 changes: 154 additions & 138 deletions llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll

Large diffs are not rendered by default.

56 changes: 28 additions & 28 deletions llvm/test/CodeGen/RISCV/rvv/sshl_sat_vec.ll
Original file line number Diff line number Diff line change
Expand Up @@ -10,17 +10,17 @@ define <2 x i64> @vec_v2i64(<2 x i64> %x, <2 x i64> %y) nounwind {
; CHECK-LABEL: vec_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT: vsll.vv v10, v8, v9
; CHECK-NEXT: vsra.vv v9, v10, v9
; CHECK-NEXT: vmsne.vv v9, v8, v9
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: li a0, -1
; CHECK-NEXT: srli a1, a0, 1
; CHECK-NEXT: vsll.vv v10, v8, v9
; CHECK-NEXT: vsra.vv v9, v10, v9
; CHECK-NEXT: vmsne.vv v8, v8, v9
; CHECK-NEXT: vmv.v.x v9, a1
; CHECK-NEXT: vmv.v.x v8, a1
; CHECK-NEXT: slli a0, a0, 63
; CHECK-NEXT: vmerge.vxm v9, v9, a0, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmerge.vvm v8, v10, v9, v0
; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
; CHECK-NEXT: vmv.v.v v0, v9
; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
; CHECK-NEXT: ret
%tmp = call <2 x i64> @llvm.sshl.sat.v2i64(<2 x i64> %x, <2 x i64> %y)
ret <2 x i64> %tmp
Expand All @@ -30,16 +30,16 @@ define <4 x i32> @vec_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind {
; CHECK-LABEL: vec_v4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vsll.vv v10, v8, v9
; CHECK-NEXT: vsra.vv v9, v10, v9
; CHECK-NEXT: vmsne.vv v9, v8, v9
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: lui a0, 524288
; CHECK-NEXT: addi a1, a0, -1
; CHECK-NEXT: vsll.vv v10, v8, v9
; CHECK-NEXT: vsra.vv v9, v10, v9
; CHECK-NEXT: vmsne.vv v8, v8, v9
; CHECK-NEXT: vmv.v.x v9, a1
; CHECK-NEXT: vmerge.vxm v9, v9, a0, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmerge.vvm v8, v10, v9, v0
; CHECK-NEXT: vmv.v.x v8, a1
; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
; CHECK-NEXT: vmv.v.v v0, v9
; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
; CHECK-NEXT: ret
%tmp = call <4 x i32> @llvm.sshl.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
ret <4 x i32> %tmp
Expand All @@ -49,16 +49,16 @@ define <8 x i16> @vec_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind {
; CHECK-LABEL: vec_v8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: vsll.vv v10, v8, v9
; CHECK-NEXT: vsra.vv v9, v10, v9
; CHECK-NEXT: vmsne.vv v9, v8, v9
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: lui a0, 8
; CHECK-NEXT: addi a1, a0, -1
; CHECK-NEXT: vsll.vv v10, v8, v9
; CHECK-NEXT: vsra.vv v9, v10, v9
; CHECK-NEXT: vmsne.vv v8, v8, v9
; CHECK-NEXT: vmv.v.x v9, a1
; CHECK-NEXT: vmerge.vxm v9, v9, a0, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmerge.vvm v8, v10, v9, v0
; CHECK-NEXT: vmv.v.x v8, a1
; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
; CHECK-NEXT: vmv.v.v v0, v9
; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
; CHECK-NEXT: ret
%tmp = call <8 x i16> @llvm.sshl.sat.v8i16(<8 x i16> %x, <8 x i16> %y)
ret <8 x i16> %tmp
Expand All @@ -68,16 +68,16 @@ define <16 x i8> @vec_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind {
; CHECK-LABEL: vec_v16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: li a0, 127
; CHECK-NEXT: vsll.vv v10, v8, v9
; CHECK-NEXT: vsra.vv v9, v10, v9
; CHECK-NEXT: vmsne.vv v8, v8, v9
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vmsne.vv v9, v8, v9
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: li a0, 127
; CHECK-NEXT: vmv.v.x v8, a0
; CHECK-NEXT: li a0, 128
; CHECK-NEXT: vmerge.vxm v9, v9, a0, v0
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmerge.vvm v8, v10, v9, v0
; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
; CHECK-NEXT: vmv.v.v v0, v9
; CHECK-NEXT: vmerge.vvm v8, v10, v8, v0
; CHECK-NEXT: ret
%tmp = call <16 x i8> @llvm.sshl.sat.v16i8(<16 x i8> %x, <16 x i8> %y)
ret <16 x i8> %tmp
Expand Down
18 changes: 9 additions & 9 deletions llvm/test/CodeGen/RISCV/rvv/stepvector.ll
Original file line number Diff line number Diff line change
Expand Up @@ -501,8 +501,8 @@ define <vscale x 8 x i64> @mul_bigimm_stepvector_nxv8i64() {
; RV32-NEXT: lui a0, 797989
; RV32-NEXT: addi a0, a0, -683
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, ma
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v8, (a0), zero
; RV32-NEXT: vid.v v16
; RV32-NEXT: vmul.vv v8, v16, v8
Expand Down Expand Up @@ -552,8 +552,8 @@ define <vscale x 16 x i64> @stepvector_nxv16i64() {
; RV32-NEXT: sw zero, 12(sp)
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, ma
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vid.v v8
; RV32-NEXT: vadd.vv v16, v8, v16
Expand All @@ -562,9 +562,9 @@ define <vscale x 16 x i64> @stepvector_nxv16i64() {
;
; RV64-LABEL: stepvector_nxv16i64:
; RV64: # %bb.0:
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, ma
; RV64-NEXT: vid.v v8
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: vadd.vx v16, v8, a0
; RV64-NEXT: ret
%v = call <vscale x 16 x i64> @llvm.experimental.stepvector.nxv16i64()
Expand Down Expand Up @@ -627,13 +627,13 @@ define <vscale x 16 x i64> @mul_stepvector_nxv16i64() {
;
; RV64-LABEL: mul_stepvector_nxv16i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, ma
; RV64-NEXT: vid.v v8
; RV64-NEXT: li a0, 3
; RV64-NEXT: vmul.vx v8, v8, a0
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a1, a0, 1
; RV64-NEXT: add a0, a1, a0
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV64-NEXT: vid.v v8
; RV64-NEXT: li a1, 3
; RV64-NEXT: vmul.vx v8, v8, a1
; RV64-NEXT: vadd.vx v16, v8, a0
; RV64-NEXT: ret
entry:
Expand Down Expand Up @@ -667,8 +667,8 @@ define <vscale x 16 x i64> @mul_bigimm_stepvector_nxv16i64() {
; RV32-NEXT: mulhu a0, a0, a2
; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: sw a0, 4(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, ma
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v8, (a0), zero
; RV32-NEXT: mv a0, sp
; RV32-NEXT: vlse64.v v16, (a0), zero
Expand Down
98 changes: 48 additions & 50 deletions llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll
Original file line number Diff line number Diff line change
Expand Up @@ -597,51 +597,49 @@ declare <vscale x 3 x double> @llvm.experimental.vp.strided.load.nxv3f64.p0.i32(
define <vscale x 16 x double> @strided_load_nxv16f64(ptr %ptr, i64 %stride, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_load_nxv16f64:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vmv1r.v v9, v0
; CHECK-RV32-NEXT: vmv1r.v v8, v0
; CHECK-RV32-NEXT: csrr a4, vlenb
; CHECK-RV32-NEXT: sub a2, a3, a4
; CHECK-RV32-NEXT: sltu a5, a3, a2
; CHECK-RV32-NEXT: addi a5, a5, -1
; CHECK-RV32-NEXT: and a2, a5, a2
; CHECK-RV32-NEXT: srli a5, a4, 3
; CHECK-RV32-NEXT: vsetvli a6, zero, e8, mf4, ta, ma
; CHECK-RV32-NEXT: vslidedown.vx v0, v0, a5
; CHECK-RV32-NEXT: bltu a3, a4, .LBB49_2
; CHECK-RV32-NEXT: # %bb.1:
; CHECK-RV32-NEXT: mv a3, a4
; CHECK-RV32-NEXT: .LBB49_2:
; CHECK-RV32-NEXT: mul a5, a3, a1
; CHECK-RV32-NEXT: add a5, a0, a5
; CHECK-RV32-NEXT: srli a4, a4, 3
; CHECK-RV32-NEXT: vsetvli a6, zero, e8, mf4, ta, ma
; CHECK-RV32-NEXT: vslidedown.vx v8, v9, a4
; CHECK-RV32-NEXT: mul a4, a3, a1
; CHECK-RV32-NEXT: add a4, a0, a4
; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-RV32-NEXT: vmv1r.v v0, v8
; CHECK-RV32-NEXT: vlse64.v v16, (a5), a1, v0.t
; CHECK-RV32-NEXT: vlse64.v v16, (a4), a1, v0.t
; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-RV32-NEXT: vmv1r.v v0, v9
; CHECK-RV32-NEXT: vmv1r.v v0, v8
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_load_nxv16f64:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vmv1r.v v9, v0
; CHECK-RV64-NEXT: vmv1r.v v8, v0
; CHECK-RV64-NEXT: csrr a4, vlenb
; CHECK-RV64-NEXT: sub a3, a2, a4
; CHECK-RV64-NEXT: sltu a5, a2, a3
; CHECK-RV64-NEXT: addi a5, a5, -1
; CHECK-RV64-NEXT: and a3, a5, a3
; CHECK-RV64-NEXT: srli a5, a4, 3
; CHECK-RV64-NEXT: vsetvli a6, zero, e8, mf4, ta, ma
; CHECK-RV64-NEXT: vslidedown.vx v0, v0, a5
; CHECK-RV64-NEXT: bltu a2, a4, .LBB49_2
; CHECK-RV64-NEXT: # %bb.1:
; CHECK-RV64-NEXT: mv a2, a4
; CHECK-RV64-NEXT: .LBB49_2:
; CHECK-RV64-NEXT: mul a5, a2, a1
; CHECK-RV64-NEXT: add a5, a0, a5
; CHECK-RV64-NEXT: srli a4, a4, 3
; CHECK-RV64-NEXT: vsetvli a6, zero, e8, mf4, ta, ma
; CHECK-RV64-NEXT: vslidedown.vx v8, v9, a4
; CHECK-RV64-NEXT: mul a4, a2, a1
; CHECK-RV64-NEXT: add a4, a0, a4
; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-RV64-NEXT: vmv1r.v v0, v8
; CHECK-RV64-NEXT: vlse64.v v16, (a5), a1, v0.t
; CHECK-RV64-NEXT: vlse64.v v16, (a4), a1, v0.t
; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-RV64-NEXT: vmv1r.v v0, v9
; CHECK-RV64-NEXT: vmv1r.v v0, v8
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%v = call <vscale x 16 x double> @llvm.experimental.vp.strided.load.nxv16f64.p0.i64(ptr %ptr, i64 %stride, <vscale x 16 x i1> %mask, i32 %evl)
Expand Down Expand Up @@ -704,25 +702,25 @@ define <vscale x 16 x double> @strided_load_nxv17f64(ptr %ptr, i64 %stride, <vsc
; CHECK-RV32-NEXT: csrr a2, vlenb
; CHECK-RV32-NEXT: slli a7, a2, 1
; CHECK-RV32-NEXT: vmv1r.v v8, v0
; CHECK-RV32-NEXT: mv a6, a3
; CHECK-RV32-NEXT: mv a5, a3
; CHECK-RV32-NEXT: bltu a3, a7, .LBB51_2
; CHECK-RV32-NEXT: # %bb.1:
; CHECK-RV32-NEXT: mv a6, a7
; CHECK-RV32-NEXT: mv a5, a7
; CHECK-RV32-NEXT: .LBB51_2:
; CHECK-RV32-NEXT: sub a5, a6, a2
; CHECK-RV32-NEXT: sltu t0, a6, a5
; CHECK-RV32-NEXT: sub a6, a5, a2
; CHECK-RV32-NEXT: sltu t0, a5, a6
; CHECK-RV32-NEXT: addi t0, t0, -1
; CHECK-RV32-NEXT: and t0, t0, a5
; CHECK-RV32-NEXT: mv a5, a6
; CHECK-RV32-NEXT: bltu a6, a2, .LBB51_4
; CHECK-RV32-NEXT: and t0, t0, a6
; CHECK-RV32-NEXT: srli a6, a2, 3
; CHECK-RV32-NEXT: vsetvli t1, zero, e8, mf4, ta, ma
; CHECK-RV32-NEXT: vslidedown.vx v0, v8, a6
; CHECK-RV32-NEXT: mv a6, a5
; CHECK-RV32-NEXT: bltu a5, a2, .LBB51_4
; CHECK-RV32-NEXT: # %bb.3:
; CHECK-RV32-NEXT: mv a5, a2
; CHECK-RV32-NEXT: mv a6, a2
; CHECK-RV32-NEXT: .LBB51_4:
; CHECK-RV32-NEXT: mul t1, a5, a1
; CHECK-RV32-NEXT: mul t1, a6, a1
; CHECK-RV32-NEXT: add t1, a0, t1
; CHECK-RV32-NEXT: srli t2, a2, 3
; CHECK-RV32-NEXT: vsetvli t3, zero, e8, mf4, ta, ma
; CHECK-RV32-NEXT: vslidedown.vx v0, v8, t2
; CHECK-RV32-NEXT: vsetvli zero, t0, e64, m8, ta, ma
; CHECK-RV32-NEXT: vlse64.v v16, (t1), a1, v0.t
; CHECK-RV32-NEXT: sub a7, a3, a7
Expand All @@ -733,14 +731,14 @@ define <vscale x 16 x double> @strided_load_nxv17f64(ptr %ptr, i64 %stride, <vsc
; CHECK-RV32-NEXT: # %bb.5:
; CHECK-RV32-NEXT: mv a3, a2
; CHECK-RV32-NEXT: .LBB51_6:
; CHECK-RV32-NEXT: mul a6, a6, a1
; CHECK-RV32-NEXT: add a6, a0, a6
; CHECK-RV32-NEXT: srli a2, a2, 2
; CHECK-RV32-NEXT: vsetvli a7, zero, e8, mf2, ta, ma
; CHECK-RV32-NEXT: vslidedown.vx v0, v8, a2
; CHECK-RV32-NEXT: mul a2, a5, a1
; CHECK-RV32-NEXT: add a2, a0, a2
; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-RV32-NEXT: vlse64.v v24, (a6), a1, v0.t
; CHECK-RV32-NEXT: vsetvli zero, a5, e64, m8, ta, ma
; CHECK-RV32-NEXT: vlse64.v v24, (a2), a1, v0.t
; CHECK-RV32-NEXT: vsetvli zero, a6, e64, m8, ta, ma
; CHECK-RV32-NEXT: vmv1r.v v0, v8
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: vs1r.v v24, (a4)
Expand All @@ -751,25 +749,25 @@ define <vscale x 16 x double> @strided_load_nxv17f64(ptr %ptr, i64 %stride, <vsc
; CHECK-RV64-NEXT: csrr a4, vlenb
; CHECK-RV64-NEXT: slli a7, a4, 1
; CHECK-RV64-NEXT: vmv1r.v v8, v0
; CHECK-RV64-NEXT: mv a6, a2
; CHECK-RV64-NEXT: mv a5, a2
; CHECK-RV64-NEXT: bltu a2, a7, .LBB51_2
; CHECK-RV64-NEXT: # %bb.1:
; CHECK-RV64-NEXT: mv a6, a7
; CHECK-RV64-NEXT: mv a5, a7
; CHECK-RV64-NEXT: .LBB51_2:
; CHECK-RV64-NEXT: sub a5, a6, a4
; CHECK-RV64-NEXT: sltu t0, a6, a5
; CHECK-RV64-NEXT: sub a6, a5, a4
; CHECK-RV64-NEXT: sltu t0, a5, a6
; CHECK-RV64-NEXT: addi t0, t0, -1
; CHECK-RV64-NEXT: and t0, t0, a5
; CHECK-RV64-NEXT: mv a5, a6
; CHECK-RV64-NEXT: bltu a6, a4, .LBB51_4
; CHECK-RV64-NEXT: and t0, t0, a6
; CHECK-RV64-NEXT: srli a6, a4, 3
; CHECK-RV64-NEXT: vsetvli t1, zero, e8, mf4, ta, ma
; CHECK-RV64-NEXT: vslidedown.vx v0, v8, a6
; CHECK-RV64-NEXT: mv a6, a5
; CHECK-RV64-NEXT: bltu a5, a4, .LBB51_4
; CHECK-RV64-NEXT: # %bb.3:
; CHECK-RV64-NEXT: mv a5, a4
; CHECK-RV64-NEXT: mv a6, a4
; CHECK-RV64-NEXT: .LBB51_4:
; CHECK-RV64-NEXT: mul t1, a5, a1
; CHECK-RV64-NEXT: mul t1, a6, a1
; CHECK-RV64-NEXT: add t1, a0, t1
; CHECK-RV64-NEXT: srli t2, a4, 3
; CHECK-RV64-NEXT: vsetvli t3, zero, e8, mf4, ta, ma
; CHECK-RV64-NEXT: vslidedown.vx v0, v8, t2
; CHECK-RV64-NEXT: vsetvli zero, t0, e64, m8, ta, ma
; CHECK-RV64-NEXT: vlse64.v v16, (t1), a1, v0.t
; CHECK-RV64-NEXT: sub a7, a2, a7
Expand All @@ -780,14 +778,14 @@ define <vscale x 16 x double> @strided_load_nxv17f64(ptr %ptr, i64 %stride, <vsc
; CHECK-RV64-NEXT: # %bb.5:
; CHECK-RV64-NEXT: mv a2, a4
; CHECK-RV64-NEXT: .LBB51_6:
; CHECK-RV64-NEXT: mul a6, a6, a1
; CHECK-RV64-NEXT: add a6, a0, a6
; CHECK-RV64-NEXT: srli a4, a4, 2
; CHECK-RV64-NEXT: vsetvli a7, zero, e8, mf2, ta, ma
; CHECK-RV64-NEXT: vslidedown.vx v0, v8, a4
; CHECK-RV64-NEXT: mul a4, a5, a1
; CHECK-RV64-NEXT: add a4, a0, a4
; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-RV64-NEXT: vlse64.v v24, (a6), a1, v0.t
; CHECK-RV64-NEXT: vsetvli zero, a5, e64, m8, ta, ma
; CHECK-RV64-NEXT: vlse64.v v24, (a4), a1, v0.t
; CHECK-RV64-NEXT: vsetvli zero, a6, e64, m8, ta, ma
; CHECK-RV64-NEXT: vmv1r.v v0, v8
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: vs1r.v v24, (a3)
Expand Down
14 changes: 7 additions & 7 deletions llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll
Original file line number Diff line number Diff line change
Expand Up @@ -507,11 +507,11 @@ define void @strided_store_nxv16f64(<vscale x 16 x double> %v, ptr %ptr, i32 sig
; CHECK-NEXT: sltu a2, a2, a5
; CHECK-NEXT: addi a2, a2, -1
; CHECK-NEXT: and a2, a2, a5
; CHECK-NEXT: mul a4, a4, a1
; CHECK-NEXT: add a0, a0, a4
; CHECK-NEXT: srli a3, a3, 3
; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
; CHECK-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v0, a3
; CHECK-NEXT: mul a3, a4, a1
; CHECK-NEXT: add a0, a0, a3
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vsse64.v v16, (a0), a1, v0.t
; CHECK-NEXT: ret
Expand Down Expand Up @@ -580,11 +580,11 @@ define void @strided_store_nxv17f64(<vscale x 17 x double> %v, ptr %ptr, i32 sig
; CHECK-NEXT: sltu t0, a5, a0
; CHECK-NEXT: addi t0, t0, -1
; CHECK-NEXT: and a0, t0, a0
; CHECK-NEXT: mul a7, a7, a2
; CHECK-NEXT: add a7, a1, a7
; CHECK-NEXT: srli t0, a4, 3
; CHECK-NEXT: vsetvli t1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v24, t0
; CHECK-NEXT: mul a7, a7, a2
; CHECK-NEXT: add a7, a1, a7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: sub a0, a3, a6
; CHECK-NEXT: sltu a3, a3, a0
Expand All @@ -595,11 +595,11 @@ define void @strided_store_nxv17f64(<vscale x 17 x double> %v, ptr %ptr, i32 sig
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: mv a0, a4
; CHECK-NEXT: .LBB43_6:
; CHECK-NEXT: mul a3, a5, a2
; CHECK-NEXT: add a1, a1, a3
; CHECK-NEXT: srli a4, a4, 2
; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v24, a4
; CHECK-NEXT: mul a3, a5, a2
; CHECK-NEXT: add a1, a1, a3
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
Expand Down
117 changes: 53 additions & 64 deletions llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -249,48 +249,35 @@ define <vscale x 32 x half> @vfsgnj_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
;
; ZVFHMIN-LABEL: vfsgnj_vv_nxv32f16:
; ZVFHMIN: # %bb.0:
; ZVFHMIN-NEXT: addi sp, sp, -16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; ZVFHMIN-NEXT: vmv1r.v v1, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
; ZVFHMIN-NEXT: sltu a4, a0, a3
; ZVFHMIN-NEXT: addi a4, a4, -1
; ZVFHMIN-NEXT: and a3, a4, a3
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsgnj.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: bltu a0, a1, .LBB10_2
; ZVFHMIN-NEXT: slli a2, a1, 1
; ZVFHMIN-NEXT: mv a3, a0
; ZVFHMIN-NEXT: bltu a0, a2, .LBB10_2
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: mv a3, a2
; ZVFHMIN-NEXT: .LBB10_2:
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
; ZVFHMIN-NEXT: vmv4r.v v4, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsgnj.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: sub a2, a0, a2
; ZVFHMIN-NEXT: sltu a0, a0, a2
; ZVFHMIN-NEXT: addi a0, a0, -1
; ZVFHMIN-NEXT: and a0, a0, a2
; ZVFHMIN-NEXT: srli a1, a1, 2
; ZVFHMIN-NEXT: vsetvli a2, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a1
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v1
; ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vp.copysign.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
ret <vscale x 32 x half> %v
Expand All @@ -308,43 +295,45 @@ define <vscale x 32 x half> @vfsgnj_vv_nxv32f16_unmasked(<vscale x 32 x half> %v
; ZVFHMIN-NEXT: addi sp, sp, -16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: slli a1, a1, 1
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: slli a2, a1, 1
; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
; ZVFHMIN-NEXT: sltu a4, a0, a3
; ZVFHMIN-NEXT: addi a4, a4, -1
; ZVFHMIN-NEXT: and a3, a4, a3
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsgnj.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: bltu a0, a1, .LBB11_2
; ZVFHMIN-NEXT: addi a3, sp, 16
; ZVFHMIN-NEXT: vs1r.v v24, (a3) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: mv a3, a0
; ZVFHMIN-NEXT: bltu a0, a2, .LBB11_2
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: mv a3, a2
; ZVFHMIN-NEXT: .LBB11_2:
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsgnj.vv v24, v24, v0
; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
; ZVFHMIN-NEXT: sub a2, a0, a2
; ZVFHMIN-NEXT: sltu a0, a0, a2
; ZVFHMIN-NEXT: addi a0, a0, -1
; ZVFHMIN-NEXT: and a0, a0, a2
; ZVFHMIN-NEXT: srli a1, a1, 2
; ZVFHMIN-NEXT: vsetvli a2, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vl1r.v v16, (a2) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vslidedown.vx v0, v16, a1
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16
; ZVFHMIN-NEXT: vfsgnj.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: slli a0, a0, 1
; ZVFHMIN-NEXT: add sp, sp, a0
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
Expand Down
Loading