24 changes: 11 additions & 13 deletions llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -648,7 +648,7 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma
; ZVFHMIN-NEXT: vmset.m v1
; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
Expand All @@ -657,7 +657,7 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: and a3, a4, a3
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v1, a2
; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
Expand All @@ -676,8 +676,7 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v1
; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
Expand Down Expand Up @@ -754,7 +753,7 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v16
; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma
; ZVFHMIN-NEXT: vmset.m v1
; ZVFHMIN-NEXT: vmset.m v16
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
Expand All @@ -763,22 +762,21 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: and a3, a4, a3
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v1, a2
; ZVFHMIN-NEXT: vslidedown.vx v0, v16, a2
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vfadd.vv v24, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24
; ZVFHMIN-NEXT: bltu a0, a1, .LBB25_2
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB25_2:
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v1
; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: ret
Expand Down
24 changes: 11 additions & 13 deletions llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -592,7 +592,7 @@ define <vscale x 32 x half> @vfdiv_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma
; ZVFHMIN-NEXT: vmset.m v1
; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
Expand All @@ -601,7 +601,7 @@ define <vscale x 32 x half> @vfdiv_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: and a3, a4, a3
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v1, a2
; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
Expand All @@ -620,8 +620,7 @@ define <vscale x 32 x half> @vfdiv_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v1
; ZVFHMIN-NEXT: vfdiv.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vfdiv.vv v16, v24, v16
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
Expand Down Expand Up @@ -698,7 +697,7 @@ define <vscale x 32 x half> @vfdiv_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v16
; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma
; ZVFHMIN-NEXT: vmset.m v1
; ZVFHMIN-NEXT: vmset.m v16
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
Expand All @@ -707,22 +706,21 @@ define <vscale x 32 x half> @vfdiv_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: and a3, a4, a3
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v1, a2
; ZVFHMIN-NEXT: vslidedown.vx v0, v16, a2
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vfdiv.vv v24, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24
; ZVFHMIN-NEXT: bltu a0, a1, .LBB23_2
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB23_2:
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v1
; ZVFHMIN-NEXT: vfdiv.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vfdiv.vv v16, v24, v16
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: ret
Expand Down
7 changes: 3 additions & 4 deletions llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -312,7 +312,7 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma
; ZVFHMIN-NEXT: vmset.m v1
; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
Expand All @@ -321,7 +321,7 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: and a3, a4, a3
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v1, a2
; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
Expand All @@ -340,8 +340,7 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v1
; ZVFHMIN-NEXT: vfmax.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vfmax.vv v16, v24, v16
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
Expand Down
7 changes: 3 additions & 4 deletions llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -312,7 +312,7 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma
; ZVFHMIN-NEXT: vmset.m v1
; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
Expand All @@ -321,7 +321,7 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: and a3, a4, a3
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v1, a2
; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
Expand All @@ -340,8 +340,7 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v1
; ZVFHMIN-NEXT: vfmin.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vfmin.vv v16, v24, v16
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
Expand Down
24 changes: 11 additions & 13 deletions llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -592,7 +592,7 @@ define <vscale x 32 x half> @vfmul_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma
; ZVFHMIN-NEXT: vmset.m v1
; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
Expand All @@ -601,7 +601,7 @@ define <vscale x 32 x half> @vfmul_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: and a3, a4, a3
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v1, a2
; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
Expand All @@ -620,8 +620,7 @@ define <vscale x 32 x half> @vfmul_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v1
; ZVFHMIN-NEXT: vfmul.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vfmul.vv v16, v24, v16
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
Expand Down Expand Up @@ -698,7 +697,7 @@ define <vscale x 32 x half> @vfmul_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v16
; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma
; ZVFHMIN-NEXT: vmset.m v1
; ZVFHMIN-NEXT: vmset.m v16
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
Expand All @@ -707,22 +706,21 @@ define <vscale x 32 x half> @vfmul_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: and a3, a4, a3
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v1, a2
; ZVFHMIN-NEXT: vslidedown.vx v0, v16, a2
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vfmul.vv v24, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24
; ZVFHMIN-NEXT: bltu a0, a1, .LBB23_2
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB23_2:
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v1
; ZVFHMIN-NEXT: vfmul.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vfmul.vv v16, v24, v16
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: ret
Expand Down
11 changes: 5 additions & 6 deletions llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -291,19 +291,18 @@ define <vscale x 32 x half> @vfneg_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v16, a2
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfneg.v v24, v24, v0.t
; ZVFHMIN-NEXT: vfneg.v v16, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: bltu a0, a1, .LBB11_2
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB11_2:
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
; ZVFHMIN-NEXT: vfneg.v v16, v24, v0.t
; ZVFHMIN-NEXT: vfneg.v v16, v16
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: ret
Expand Down
11 changes: 5 additions & 6 deletions llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -291,19 +291,18 @@ define <vscale x 32 x half> @vfsqrt_vv_nxv32f16_unmasked(<vscale x 32 x half> %v
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v16, a2
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsqrt.v v24, v24, v0.t
; ZVFHMIN-NEXT: vfsqrt.v v16, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: bltu a0, a1, .LBB11_2
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB11_2:
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
; ZVFHMIN-NEXT: vfsqrt.v v16, v24, v0.t
; ZVFHMIN-NEXT: vfsqrt.v v16, v16
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: ret
Expand Down
24 changes: 11 additions & 13 deletions llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -592,7 +592,7 @@ define <vscale x 32 x half> @vfsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma
; ZVFHMIN-NEXT: vmset.m v1
; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
Expand All @@ -601,7 +601,7 @@ define <vscale x 32 x half> @vfsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: and a3, a4, a3
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v1, a2
; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
Expand All @@ -620,8 +620,7 @@ define <vscale x 32 x half> @vfsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v1
; ZVFHMIN-NEXT: vfsub.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vfsub.vv v16, v24, v16
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
Expand Down Expand Up @@ -698,7 +697,7 @@ define <vscale x 32 x half> @vfsub_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v16
; ZVFHMIN-NEXT: vsetvli a1, zero, e8, m4, ta, ma
; ZVFHMIN-NEXT: vmset.m v1
; ZVFHMIN-NEXT: vmset.m v16
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
Expand All @@ -707,22 +706,21 @@ define <vscale x 32 x half> @vfsub_vf_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: and a3, a4, a3
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v1, a2
; ZVFHMIN-NEXT: vslidedown.vx v0, v16, a2
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vfsub.vv v24, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24
; ZVFHMIN-NEXT: bltu a0, a1, .LBB23_2
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB23_2:
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v1
; ZVFHMIN-NEXT: vfsub.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vfsub.vv v16, v24, v16
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: ret
Expand Down