36 changes: 18 additions & 18 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1464,8 +1464,8 @@ define <8 x i1> @icmp_eq_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroex
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vmseq.vv v12, v8, v16, v0.t
Expand All @@ -1492,8 +1492,8 @@ define <8 x i1> @icmp_eq_vx_swap_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 z
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vmseq.vv v12, v16, v8, v0.t
Expand Down Expand Up @@ -1557,8 +1557,8 @@ define <8 x i1> @icmp_ne_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroex
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vmsne.vv v12, v8, v16, v0.t
Expand All @@ -1585,8 +1585,8 @@ define <8 x i1> @icmp_ne_vx_swap_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 z
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vmsne.vv v12, v16, v8, v0.t
Expand Down Expand Up @@ -1650,8 +1650,8 @@ define <8 x i1> @icmp_ugt_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroe
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vmsltu.vv v12, v16, v8, v0.t
Expand All @@ -1678,8 +1678,8 @@ define <8 x i1> @icmp_ugt_vx_swap_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vmsltu.vv v12, v8, v16, v0.t
Expand Down Expand Up @@ -1743,8 +1743,8 @@ define <8 x i1> @icmp_uge_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroe
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vmsleu.vv v12, v16, v8, v0.t
Expand Down Expand Up @@ -1773,8 +1773,8 @@ define <8 x i1> @icmp_uge_vx_swap_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vmsleu.vv v12, v8, v16, v0.t
Expand Down Expand Up @@ -1838,8 +1838,8 @@ define <8 x i1> @icmp_ult_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroe
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vmsltu.vv v12, v8, v16, v0.t
Expand All @@ -1866,8 +1866,8 @@ define <8 x i1> @icmp_ult_vx_swap_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vmsltu.vv v12, v16, v8, v0.t
Expand Down Expand Up @@ -1931,8 +1931,8 @@ define <8 x i1> @icmp_sgt_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroe
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vmslt.vv v12, v16, v8, v0.t
Expand All @@ -1959,8 +1959,8 @@ define <8 x i1> @icmp_sgt_vx_swap_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vmslt.vv v12, v8, v16, v0.t
Expand Down Expand Up @@ -2024,8 +2024,8 @@ define <8 x i1> @icmp_sge_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroe
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vmsle.vv v12, v16, v8, v0.t
Expand Down Expand Up @@ -2054,8 +2054,8 @@ define <8 x i1> @icmp_sge_vx_swap_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vmsle.vv v12, v8, v16, v0.t
Expand Down Expand Up @@ -2119,8 +2119,8 @@ define <8 x i1> @icmp_slt_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroe
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vmslt.vv v12, v8, v16, v0.t
Expand All @@ -2147,8 +2147,8 @@ define <8 x i1> @icmp_slt_vx_swap_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vmslt.vv v12, v16, v8, v0.t
Expand Down Expand Up @@ -2212,8 +2212,8 @@ define <8 x i1> @icmp_sle_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroe
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vmsle.vv v12, v8, v16, v0.t
Expand All @@ -2240,8 +2240,8 @@ define <8 x i1> @icmp_sle_vx_swap_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vmsle.vv v12, v16, v8, v0.t
Expand Down
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1140,8 +1140,8 @@ define <2 x i64> @vadd_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vadd.vv v8, v8, v9, v0.t
Expand All @@ -1166,8 +1166,8 @@ define <2 x i64> @vadd_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vadd.vv v8, v8, v9
Expand Down Expand Up @@ -1244,8 +1244,8 @@ define <4 x i64> @vadd_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; RV32-NEXT: vadd.vv v8, v8, v10, v0.t
Expand All @@ -1270,8 +1270,8 @@ define <4 x i64> @vadd_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; RV32-NEXT: vadd.vv v8, v8, v10
Expand Down Expand Up @@ -1348,8 +1348,8 @@ define <8 x i64> @vadd_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: vadd.vv v8, v8, v12, v0.t
Expand All @@ -1374,8 +1374,8 @@ define <8 x i64> @vadd_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: vadd.vv v8, v8, v12
Expand Down Expand Up @@ -1452,8 +1452,8 @@ define <16 x i64> @vadd_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zero
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
Expand All @@ -1478,8 +1478,8 @@ define <16 x i64> @vadd_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vadd.vv v8, v8, v16
Expand Down
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -985,8 +985,8 @@ define <2 x i64> @vand_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vand.vv v8, v8, v9, v0.t
Expand All @@ -1011,8 +1011,8 @@ define <2 x i64> @vand_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vand.vv v8, v8, v9
Expand Down Expand Up @@ -1089,8 +1089,8 @@ define <4 x i64> @vand_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; RV32-NEXT: vand.vv v8, v8, v10, v0.t
Expand All @@ -1115,8 +1115,8 @@ define <4 x i64> @vand_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; RV32-NEXT: vand.vv v8, v8, v10
Expand Down Expand Up @@ -1193,8 +1193,8 @@ define <8 x i64> @vand_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: vand.vv v8, v8, v12, v0.t
Expand All @@ -1219,8 +1219,8 @@ define <8 x i64> @vand_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: vand.vv v8, v8, v12
Expand Down Expand Up @@ -1405,8 +1405,8 @@ define <16 x i64> @vand_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zero
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
Expand All @@ -1431,8 +1431,8 @@ define <16 x i64> @vand_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vand.vv v8, v8, v16
Expand Down
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -664,8 +664,8 @@ define <2 x i64> @vdiv_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vdiv.vv v8, v8, v9, v0.t
Expand All @@ -690,8 +690,8 @@ define <2 x i64> @vdiv_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vdiv.vv v8, v8, v9
Expand Down Expand Up @@ -742,8 +742,8 @@ define <4 x i64> @vdiv_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; RV32-NEXT: vdiv.vv v8, v8, v10, v0.t
Expand All @@ -768,8 +768,8 @@ define <4 x i64> @vdiv_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; RV32-NEXT: vdiv.vv v8, v8, v10
Expand Down Expand Up @@ -820,8 +820,8 @@ define <8 x i64> @vdiv_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: vdiv.vv v8, v8, v12, v0.t
Expand All @@ -846,8 +846,8 @@ define <8 x i64> @vdiv_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: vdiv.vv v8, v8, v12
Expand Down Expand Up @@ -898,8 +898,8 @@ define <16 x i64> @vdiv_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zero
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vdiv.vv v8, v8, v16, v0.t
Expand All @@ -924,8 +924,8 @@ define <16 x i64> @vdiv_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vdiv.vv v8, v8, v16
Expand Down
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -663,8 +663,8 @@ define <2 x i64> @vdivu_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vdivu.vv v8, v8, v9, v0.t
Expand All @@ -689,8 +689,8 @@ define <2 x i64> @vdivu_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %ev
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vdivu.vv v8, v8, v9
Expand Down Expand Up @@ -741,8 +741,8 @@ define <4 x i64> @vdivu_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; RV32-NEXT: vdivu.vv v8, v8, v10, v0.t
Expand All @@ -767,8 +767,8 @@ define <4 x i64> @vdivu_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %ev
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; RV32-NEXT: vdivu.vv v8, v8, v10
Expand Down Expand Up @@ -819,8 +819,8 @@ define <8 x i64> @vdivu_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: vdivu.vv v8, v8, v12, v0.t
Expand All @@ -845,8 +845,8 @@ define <8 x i64> @vdivu_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %ev
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: vdivu.vv v8, v8, v12
Expand Down Expand Up @@ -897,8 +897,8 @@ define <16 x i64> @vdivu_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zer
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vdivu.vv v8, v8, v16, v0.t
Expand All @@ -923,8 +923,8 @@ define <16 x i64> @vdivu_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vdivu.vv v8, v8, v16
Expand Down
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -709,8 +709,8 @@ define <2 x i64> @vmul_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vmul.vv v8, v8, v9, v0.t
Expand All @@ -735,8 +735,8 @@ define <2 x i64> @vmul_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vmul.vv v8, v8, v9
Expand Down Expand Up @@ -787,8 +787,8 @@ define <4 x i64> @vmul_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; RV32-NEXT: vmul.vv v8, v8, v10, v0.t
Expand All @@ -813,8 +813,8 @@ define <4 x i64> @vmul_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; RV32-NEXT: vmul.vv v8, v8, v10
Expand Down Expand Up @@ -865,8 +865,8 @@ define <8 x i64> @vmul_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: vmul.vv v8, v8, v12, v0.t
Expand All @@ -891,8 +891,8 @@ define <8 x i64> @vmul_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: vmul.vv v8, v8, v12
Expand Down Expand Up @@ -943,8 +943,8 @@ define <16 x i64> @vmul_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zero
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
Expand All @@ -969,8 +969,8 @@ define <16 x i64> @vmul_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vmul.vv v8, v8, v16
Expand Down
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1047,8 +1047,8 @@ define <2 x i64> @vor_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext %
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vor.vv v8, v8, v9, v0.t
Expand All @@ -1073,8 +1073,8 @@ define <2 x i64> @vor_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl)
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vor.vv v8, v8, v9
Expand Down Expand Up @@ -1151,8 +1151,8 @@ define <4 x i64> @vor_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext %
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; RV32-NEXT: vor.vv v8, v8, v10, v0.t
Expand All @@ -1177,8 +1177,8 @@ define <4 x i64> @vor_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl)
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; RV32-NEXT: vor.vv v8, v8, v10
Expand Down Expand Up @@ -1255,8 +1255,8 @@ define <8 x i64> @vor_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: vor.vv v8, v8, v12, v0.t
Expand All @@ -1281,8 +1281,8 @@ define <8 x i64> @vor_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl)
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: vor.vv v8, v8, v12
Expand Down Expand Up @@ -1359,8 +1359,8 @@ define <16 x i64> @vor_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zeroe
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
Expand All @@ -1385,8 +1385,8 @@ define <16 x i64> @vor_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %e
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vor.vv v8, v8, v16
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll
Original file line number Diff line number Diff line change
Expand Up @@ -570,8 +570,8 @@ define <2 x i64> @vpmerge_vx_v2i64(i64 %a, <2 x i64> %vb, <2 x i1> %m, i32 zeroe
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu
; RV32-NEXT: vmerge.vvm v8, v8, v9, v0
Expand Down Expand Up @@ -621,8 +621,8 @@ define <4 x i64> @vpmerge_vx_v4i64(i64 %a, <4 x i64> %vb, <4 x i1> %m, i32 zeroe
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, tu, mu
; RV32-NEXT: vmerge.vvm v8, v8, v10, v0
Expand Down Expand Up @@ -672,8 +672,8 @@ define <8 x i64> @vpmerge_vx_v8i64(i64 %a, <8 x i64> %vb, <8 x i1> %m, i32 zeroe
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, tu, mu
; RV32-NEXT: vmerge.vvm v8, v8, v12, v0
Expand Down Expand Up @@ -723,8 +723,8 @@ define <16 x i64> @vpmerge_vx_v16i64(i64 %a, <16 x i64> %vb, <16 x i1> %m, i32 z
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, tu, mu
; RV32-NEXT: vmerge.vvm v8, v8, v16, v0
Expand Down
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -664,8 +664,8 @@ define <2 x i64> @vrem_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vrem.vv v8, v8, v9, v0.t
Expand All @@ -690,8 +690,8 @@ define <2 x i64> @vrem_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vrem.vv v8, v8, v9
Expand Down Expand Up @@ -742,8 +742,8 @@ define <4 x i64> @vrem_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; RV32-NEXT: vrem.vv v8, v8, v10, v0.t
Expand All @@ -768,8 +768,8 @@ define <4 x i64> @vrem_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; RV32-NEXT: vrem.vv v8, v8, v10
Expand Down Expand Up @@ -820,8 +820,8 @@ define <8 x i64> @vrem_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: vrem.vv v8, v8, v12, v0.t
Expand All @@ -846,8 +846,8 @@ define <8 x i64> @vrem_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: vrem.vv v8, v8, v12
Expand Down Expand Up @@ -898,8 +898,8 @@ define <16 x i64> @vrem_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zero
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vrem.vv v8, v8, v16, v0.t
Expand All @@ -924,8 +924,8 @@ define <16 x i64> @vrem_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vrem.vv v8, v8, v16
Expand Down
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -663,8 +663,8 @@ define <2 x i64> @vremu_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vremu.vv v8, v8, v9, v0.t
Expand All @@ -689,8 +689,8 @@ define <2 x i64> @vremu_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %ev
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vremu.vv v8, v8, v9
Expand Down Expand Up @@ -741,8 +741,8 @@ define <4 x i64> @vremu_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; RV32-NEXT: vremu.vv v8, v8, v10, v0.t
Expand All @@ -767,8 +767,8 @@ define <4 x i64> @vremu_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %ev
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; RV32-NEXT: vremu.vv v8, v8, v10
Expand Down Expand Up @@ -819,8 +819,8 @@ define <8 x i64> @vremu_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: vremu.vv v8, v8, v12, v0.t
Expand All @@ -845,8 +845,8 @@ define <8 x i64> @vremu_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %ev
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: vremu.vv v8, v8, v12
Expand Down Expand Up @@ -897,8 +897,8 @@ define <16 x i64> @vremu_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zer
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vremu.vv v8, v8, v16, v0.t
Expand All @@ -923,8 +923,8 @@ define <16 x i64> @vremu_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vremu.vv v8, v8, v16
Expand Down
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrsub-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -661,8 +661,8 @@ define <2 x i64> @vrsub_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vsub.vv v8, v9, v8, v0.t
Expand All @@ -687,8 +687,8 @@ define <2 x i64> @vrsub_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %ev
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vsub.vv v8, v9, v8
Expand Down Expand Up @@ -743,8 +743,8 @@ define <4 x i64> @vrsub_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; RV32-NEXT: vsub.vv v8, v10, v8, v0.t
Expand All @@ -769,8 +769,8 @@ define <4 x i64> @vrsub_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %ev
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; RV32-NEXT: vsub.vv v8, v10, v8
Expand Down Expand Up @@ -825,8 +825,8 @@ define <8 x i64> @vrsub_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: vsub.vv v8, v12, v8, v0.t
Expand All @@ -851,8 +851,8 @@ define <8 x i64> @vrsub_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %ev
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: vsub.vv v8, v12, v8
Expand Down Expand Up @@ -907,8 +907,8 @@ define <16 x i64> @vrsub_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zer
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vsub.vv v8, v16, v8, v0.t
Expand All @@ -933,8 +933,8 @@ define <16 x i64> @vrsub_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vsub.vv v8, v16, v8
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd.ll
Original file line number Diff line number Diff line change
Expand Up @@ -455,8 +455,8 @@ define <2 x i64> @sadd_v2i64_vx(<2 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsadd.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -504,8 +504,8 @@ define <4 x i64> @sadd_v4i64_vx(<4 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsadd.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -553,8 +553,8 @@ define <8 x i64> @sadd_v8i64_vx(<8 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsadd.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -602,8 +602,8 @@ define <16 x i64> @sadd_v16i64_vx(<16 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsadd.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu.ll
Original file line number Diff line number Diff line change
Expand Up @@ -455,8 +455,8 @@ define <2 x i64> @uadd_v2i64_vx(<2 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsaddu.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -504,8 +504,8 @@ define <4 x i64> @uadd_v4i64_vx(<4 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsaddu.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -553,8 +553,8 @@ define <8 x i64> @uadd_v8i64_vx(<8 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsaddu.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -602,8 +602,8 @@ define <16 x i64> @uadd_v16i64_vx(<16 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsaddu.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub.ll
Original file line number Diff line number Diff line change
Expand Up @@ -467,8 +467,8 @@ define <2 x i64> @ssub_v2i64_vx(<2 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vssub.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -517,8 +517,8 @@ define <4 x i64> @ssub_v4i64_vx(<4 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vssub.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -567,8 +567,8 @@ define <8 x i64> @ssub_v8i64_vx(<8 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vssub.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -617,8 +617,8 @@ define <16 x i64> @ssub_v16i64_vx(<16 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vssub.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu.ll
Original file line number Diff line number Diff line change
Expand Up @@ -467,8 +467,8 @@ define <2 x i64> @usub_v2i64_vx(<2 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vssubu.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -517,8 +517,8 @@ define <4 x i64> @usub_v4i64_vx(<4 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vssubu.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -567,8 +567,8 @@ define <8 x i64> @usub_v8i64_vx(<8 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vssubu.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -617,8 +617,8 @@ define <16 x i64> @usub_v16i64_vx(<16 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vssubu.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
Expand Down
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -697,8 +697,8 @@ define <2 x i64> @vsub_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vsub.vv v8, v8, v9, v0.t
Expand All @@ -723,8 +723,8 @@ define <2 x i64> @vsub_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vsub.vv v8, v8, v9
Expand Down Expand Up @@ -775,8 +775,8 @@ define <4 x i64> @vsub_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; RV32-NEXT: vsub.vv v8, v8, v10, v0.t
Expand All @@ -801,8 +801,8 @@ define <4 x i64> @vsub_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; RV32-NEXT: vsub.vv v8, v8, v10
Expand Down Expand Up @@ -853,8 +853,8 @@ define <8 x i64> @vsub_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: vsub.vv v8, v8, v12, v0.t
Expand All @@ -879,8 +879,8 @@ define <8 x i64> @vsub_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: vsub.vv v8, v8, v12
Expand Down Expand Up @@ -931,8 +931,8 @@ define <16 x i64> @vsub_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zero
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
Expand All @@ -957,8 +957,8 @@ define <16 x i64> @vsub_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vsub.vv v8, v8, v16
Expand Down
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1385,8 +1385,8 @@ define <2 x i64> @vxor_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vxor.vv v8, v8, v9, v0.t
Expand All @@ -1411,8 +1411,8 @@ define <2 x i64> @vxor_vx_v2i64_unmasked(<2 x i64> %va, i64 %b, i32 zeroext %evl
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vxor.vv v8, v8, v9
Expand Down Expand Up @@ -1515,8 +1515,8 @@ define <4 x i64> @vxor_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; RV32-NEXT: vxor.vv v8, v8, v10, v0.t
Expand All @@ -1541,8 +1541,8 @@ define <4 x i64> @vxor_vx_v4i64_unmasked(<4 x i64> %va, i64 %b, i32 zeroext %evl
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; RV32-NEXT: vxor.vv v8, v8, v10
Expand Down Expand Up @@ -1645,8 +1645,8 @@ define <8 x i64> @vxor_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: vxor.vv v8, v8, v12, v0.t
Expand All @@ -1671,8 +1671,8 @@ define <8 x i64> @vxor_vx_v8i64_unmasked(<8 x i64> %va, i64 %b, i32 zeroext %evl
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: vxor.vv v8, v8, v12
Expand Down Expand Up @@ -1775,8 +1775,8 @@ define <16 x i64> @vxor_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zero
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vxor.vv v8, v8, v16, v0.t
Expand All @@ -1801,8 +1801,8 @@ define <16 x i64> @vxor_vx_v16i64_unmasked(<16 x i64> %va, i64 %b, i32 zeroext %
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vxor.vv v8, v8, v16
Expand Down
3 changes: 2 additions & 1 deletion llvm/test/CodeGen/RISCV/rvv/frameindex-addr.ll
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@ define i64 @test(<vscale x 1 x i64> %0) nounwind {
; CHECK-NEXT: liveins: $v8
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:vr = COPY $v8
; CHECK-NEXT: PseudoVSE64_V_M1 [[COPY]], %stack.0.a, 1, 6 /* e64 */
; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI %stack.0.a, 0
; CHECK-NEXT: PseudoVSE64_V_M1 [[COPY]], killed [[ADDI]], 1, 6 /* e64 */
; CHECK-NEXT: [[LD:%[0-9]+]]:gpr = LD %stack.0.a, 0 :: (dereferenceable load (s64) from %ir.a)
; CHECK-NEXT: $x10 = COPY [[LD]]
; CHECK-NEXT: PseudoRET implicit $x10
Expand Down
68 changes: 37 additions & 31 deletions llvm/test/CodeGen/RISCV/rvv/localvar.ll
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,10 @@ define void @local_var_mf8() {
; RV64IV-NEXT: csrr a0, vlenb
; RV64IV-NEXT: slli a0, a0, 1
; RV64IV-NEXT: sub sp, sp, a0
; RV64IV-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
; RV64IV-NEXT: csrr a0, vlenb
; RV64IV-NEXT: add a0, sp, a0
; RV64IV-NEXT: addi a0, a0, 16
; RV64IV-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
; RV64IV-NEXT: vle8.v v8, (a0)
; RV64IV-NEXT: addi a0, sp, 16
; RV64IV-NEXT: vle8.v v8, (a0)
Expand Down Expand Up @@ -153,24 +153,24 @@ define void @local_var_m8() {
define void @local_var_m2_mix_local_scalar() {
; RV64IV-LABEL: local_var_m2_mix_local_scalar:
; RV64IV: # %bb.0:
; RV64IV-NEXT: addi sp, sp, -32
; RV64IV-NEXT: .cfi_def_cfa_offset 32
; RV64IV-NEXT: addi sp, sp, -16
; RV64IV-NEXT: .cfi_def_cfa_offset 16
; RV64IV-NEXT: csrr a0, vlenb
; RV64IV-NEXT: slli a0, a0, 2
; RV64IV-NEXT: sub sp, sp, a0
; RV64IV-NEXT: lw a0, 28(sp)
; RV64IV-NEXT: lw a0, 12(sp)
; RV64IV-NEXT: csrr a0, vlenb
; RV64IV-NEXT: slli a0, a0, 1
; RV64IV-NEXT: add a0, sp, a0
; RV64IV-NEXT: addi a0, a0, 32
; RV64IV-NEXT: addi a0, a0, 16
; RV64IV-NEXT: vl2r.v v8, (a0)
; RV64IV-NEXT: addi a0, sp, 32
; RV64IV-NEXT: addi a0, sp, 16
; RV64IV-NEXT: vl2r.v v8, (a0)
; RV64IV-NEXT: lw a0, 24(sp)
; RV64IV-NEXT: lw a0, 8(sp)
; RV64IV-NEXT: csrr a0, vlenb
; RV64IV-NEXT: slli a0, a0, 2
; RV64IV-NEXT: add sp, sp, a0
; RV64IV-NEXT: addi sp, sp, 32
; RV64IV-NEXT: addi sp, sp, 16
; RV64IV-NEXT: ret
%local_scalar0 = alloca i32
%local0 = alloca <vscale x 16 x i8>
Expand All @@ -190,8 +190,10 @@ define void @local_var_m2_with_varsize_object(i64 %n) {
; RV64IV-NEXT: .cfi_def_cfa_offset 32
; RV64IV-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; RV64IV-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
; RV64IV-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
; RV64IV-NEXT: .cfi_offset ra, -8
; RV64IV-NEXT: .cfi_offset s0, -16
; RV64IV-NEXT: .cfi_offset s1, -24
; RV64IV-NEXT: addi s0, sp, 32
; RV64IV-NEXT: .cfi_def_cfa s0, 0
; RV64IV-NEXT: csrr a1, vlenb
Expand All @@ -205,12 +207,12 @@ define void @local_var_m2_with_varsize_object(i64 %n) {
; RV64IV-NEXT: slli a1, a1, 1
; RV64IV-NEXT: sub a1, s0, a1
; RV64IV-NEXT: addi a1, a1, -32
; RV64IV-NEXT: csrr a2, vlenb
; RV64IV-NEXT: slli a2, a2, 1
; RV64IV-NEXT: sub a2, s0, a2
; RV64IV-NEXT: addi s1, a2, -32
; RV64IV-NEXT: call notdead@plt
; RV64IV-NEXT: csrr a0, vlenb
; RV64IV-NEXT: slli a0, a0, 1
; RV64IV-NEXT: sub a0, s0, a0
; RV64IV-NEXT: addi a0, a0, -32
; RV64IV-NEXT: vl2r.v v8, (a0)
; RV64IV-NEXT: vl2r.v v8, (s1)
; RV64IV-NEXT: csrr a0, vlenb
; RV64IV-NEXT: slli a0, a0, 2
; RV64IV-NEXT: sub a0, s0, a0
Expand All @@ -219,6 +221,7 @@ define void @local_var_m2_with_varsize_object(i64 %n) {
; RV64IV-NEXT: addi sp, s0, -32
; RV64IV-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; RV64IV-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; RV64IV-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
; RV64IV-NEXT: addi sp, sp, 32
; RV64IV-NEXT: ret
%1 = alloca i8, i64 %n
Expand All @@ -233,15 +236,17 @@ define void @local_var_m2_with_varsize_object(i64 %n) {
define void @local_var_m2_with_bp(i64 %n) {
; RV64IV-LABEL: local_var_m2_with_bp:
; RV64IV: # %bb.0:
; RV64IV-NEXT: addi sp, sp, -272
; RV64IV-NEXT: .cfi_def_cfa_offset 272
; RV64IV-NEXT: sd ra, 264(sp) # 8-byte Folded Spill
; RV64IV-NEXT: sd s0, 256(sp) # 8-byte Folded Spill
; RV64IV-NEXT: sd s1, 248(sp) # 8-byte Folded Spill
; RV64IV-NEXT: addi sp, sp, -256
; RV64IV-NEXT: .cfi_def_cfa_offset 256
; RV64IV-NEXT: sd ra, 248(sp) # 8-byte Folded Spill
; RV64IV-NEXT: sd s0, 240(sp) # 8-byte Folded Spill
; RV64IV-NEXT: sd s1, 232(sp) # 8-byte Folded Spill
; RV64IV-NEXT: sd s2, 224(sp) # 8-byte Folded Spill
; RV64IV-NEXT: .cfi_offset ra, -8
; RV64IV-NEXT: .cfi_offset s0, -16
; RV64IV-NEXT: .cfi_offset s1, -24
; RV64IV-NEXT: addi s0, sp, 272
; RV64IV-NEXT: .cfi_offset s2, -32
; RV64IV-NEXT: addi s0, sp, 256
; RV64IV-NEXT: .cfi_def_cfa s0, 0
; RV64IV-NEXT: csrr a1, vlenb
; RV64IV-NEXT: slli a1, a1, 2
Expand All @@ -256,22 +261,23 @@ define void @local_var_m2_with_bp(i64 %n) {
; RV64IV-NEXT: csrr a2, vlenb
; RV64IV-NEXT: slli a2, a2, 1
; RV64IV-NEXT: add a2, s1, a2
; RV64IV-NEXT: addi a2, a2, 240
; RV64IV-NEXT: addi a2, a2, 224
; RV64IV-NEXT: csrr a3, vlenb
; RV64IV-NEXT: slli a3, a3, 1
; RV64IV-NEXT: add a3, s1, a3
; RV64IV-NEXT: addi s2, a3, 224
; RV64IV-NEXT: call notdead2@plt
; RV64IV-NEXT: lw a0, 124(s1)
; RV64IV-NEXT: csrr a0, vlenb
; RV64IV-NEXT: slli a0, a0, 1
; RV64IV-NEXT: add a0, s1, a0
; RV64IV-NEXT: addi a0, a0, 240
; RV64IV-NEXT: vl2r.v v8, (a0)
; RV64IV-NEXT: addi a0, s1, 240
; RV64IV-NEXT: vl2r.v v8, (s2)
; RV64IV-NEXT: addi a0, s1, 224
; RV64IV-NEXT: vl2r.v v8, (a0)
; RV64IV-NEXT: lw a0, 120(s1)
; RV64IV-NEXT: addi sp, s0, -272
; RV64IV-NEXT: ld ra, 264(sp) # 8-byte Folded Reload
; RV64IV-NEXT: ld s0, 256(sp) # 8-byte Folded Reload
; RV64IV-NEXT: ld s1, 248(sp) # 8-byte Folded Reload
; RV64IV-NEXT: addi sp, sp, 272
; RV64IV-NEXT: addi sp, s0, -256
; RV64IV-NEXT: ld ra, 248(sp) # 8-byte Folded Reload
; RV64IV-NEXT: ld s0, 240(sp) # 8-byte Folded Reload
; RV64IV-NEXT: ld s1, 232(sp) # 8-byte Folded Reload
; RV64IV-NEXT: ld s2, 224(sp) # 8-byte Folded Reload
; RV64IV-NEXT: addi sp, sp, 256
; RV64IV-NEXT: ret
%1 = alloca i8, i64 %n
%2 = alloca i32, align 128
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1383,8 +1383,8 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i64_i64(<vscale x 1 x i64>
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; RV32-NEXT: vmseq.vv v0, v8, v9, v0.t
Expand Down Expand Up @@ -1420,8 +1420,8 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i64_i64(<vscale x 1 x i64>
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; RV32-NEXT: vmsle.vv v0, v9, v8, v0.t
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/RISCV/rvv/memory-args.ll
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,8 @@ define <vscale x 64 x i8> @caller() {
; RV64IV-NEXT: add a0, sp, a0
; RV64IV-NEXT: addi a0, a0, 64
; RV64IV-NEXT: vl8r.v v24, (a0)
; RV64IV-NEXT: addi a0, sp, 64
; RV64IV-NEXT: addi a1, sp, 64
; RV64IV-NEXT: addi a0, sp, 64
; RV64IV-NEXT: vs8r.v v24, (a1)
; RV64IV-NEXT: call callee@plt
; RV64IV-NEXT: addi sp, s0, -80
Expand Down
19 changes: 7 additions & 12 deletions llvm/test/CodeGen/RISCV/rvv/rvv-args-by-mem.ll
Original file line number Diff line number Diff line change
Expand Up @@ -34,19 +34,14 @@ define <vscale x 16 x i32> @foo(i32 %0, i32 %1, i32 %2, i32 %3, i32 %4, i32 %5,
; CHECK-NEXT: andi sp, sp, -64
; CHECK-NEXT: mv s1, sp
; CHECK-NEXT: addi t0, s1, 64
; CHECK-NEXT: sd t0, 8(sp)
; CHECK-NEXT: csrr t0, vlenb
; CHECK-NEXT: slli t0, t0, 3
; CHECK-NEXT: add t0, s1, t0
; CHECK-NEXT: addi t0, t0, 64
; CHECK-NEXT: sd t0, 0(sp)
; CHECK-NEXT: addi t0, s1, 64
; CHECK-NEXT: vs8r.v v8, (t0)
; CHECK-NEXT: csrr t0, vlenb
; CHECK-NEXT: slli t0, t0, 3
; CHECK-NEXT: add t0, s1, t0
; CHECK-NEXT: addi t0, t0, 64
; CHECK-NEXT: vs8r.v v8, (t0)
; CHECK-NEXT: csrr t1, vlenb
; CHECK-NEXT: slli t1, t1, 3
; CHECK-NEXT: add t1, s1, t1
; CHECK-NEXT: addi t1, t1, 64
; CHECK-NEXT: vs8r.v v8, (t1)
; CHECK-NEXT: sd t0, 8(sp)
; CHECK-NEXT: sd t1, 0(sp)
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: call bar@plt
; CHECK-NEXT: addi sp, s0, -80
Expand Down
60 changes: 27 additions & 33 deletions llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll
Original file line number Diff line number Diff line change
Expand Up @@ -5,34 +5,34 @@
define dso_local void @lots_args(i32 signext %x0, i32 signext %x1, <vscale x 16 x i32> %v0, i32 signext %x2, i32 signext %x3, i32 signext %x4, i32 signext %x5, i32 signext %x6, i32 %x7, i32 %x8, i32 %x9) #0 {
; CHECK-LABEL: lots_args:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: addi sp, sp, -80
; CHECK-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; CHECK-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
; CHECK-NEXT: addi s0, sp, 80
; CHECK-NEXT: addi sp, sp, -64
; CHECK-NEXT: sd ra, 56(sp) # 8-byte Folded Spill
; CHECK-NEXT: sd s0, 48(sp) # 8-byte Folded Spill
; CHECK-NEXT: addi s0, sp, 64
; CHECK-NEXT: csrr t0, vlenb
; CHECK-NEXT: slli t0, t0, 3
; CHECK-NEXT: sub sp, sp, t0
; CHECK-NEXT: ld t0, 8(s0)
; CHECK-NEXT: ld t1, 0(s0)
; CHECK-NEXT: sw a0, -36(s0)
; CHECK-NEXT: sw a1, -40(s0)
; CHECK-NEXT: sw a0, -28(s0)
; CHECK-NEXT: sw a1, -32(s0)
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: sub a0, s0, a0
; CHECK-NEXT: addi a0, a0, -80
; CHECK-NEXT: addi a0, a0, -64
; CHECK-NEXT: vs8r.v v8, (a0)
; CHECK-NEXT: sw a2, -44(s0)
; CHECK-NEXT: sw a3, -48(s0)
; CHECK-NEXT: sw a4, -52(s0)
; CHECK-NEXT: sw a5, -56(s0)
; CHECK-NEXT: sw a6, -60(s0)
; CHECK-NEXT: sw a7, -64(s0)
; CHECK-NEXT: sw t1, -68(s0)
; CHECK-NEXT: sw t0, -72(s0)
; CHECK-NEXT: addi sp, s0, -80
; CHECK-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; CHECK-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 80
; CHECK-NEXT: sw a2, -36(s0)
; CHECK-NEXT: sw a3, -40(s0)
; CHECK-NEXT: sw a4, -44(s0)
; CHECK-NEXT: sw a5, -48(s0)
; CHECK-NEXT: sw a6, -52(s0)
; CHECK-NEXT: sw a7, -56(s0)
; CHECK-NEXT: sw t1, -60(s0)
; CHECK-NEXT: sw t0, -64(s0)
; CHECK-NEXT: addi sp, s0, -64
; CHECK-NEXT: ld ra, 56(sp) # 8-byte Folded Reload
; CHECK-NEXT: ld s0, 48(sp) # 8-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 64
; CHECK-NEXT: ret
entry:
%x0.addr = alloca i32, align 4
Expand Down Expand Up @@ -66,6 +66,7 @@ define dso_local signext i32 @main() #0 {
; CHECK-NEXT: addi sp, sp, -112
; CHECK-NEXT: sd ra, 104(sp) # 8-byte Folded Spill
; CHECK-NEXT: sd s0, 96(sp) # 8-byte Folded Spill
; CHECK-NEXT: sd s1, 88(sp) # 8-byte Folded Spill
; CHECK-NEXT: addi s0, sp, 112
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
Expand All @@ -76,14 +77,14 @@ define dso_local signext i32 @main() #0 {
; CHECK-NEXT: vsetivli a0, 4, e32, m8, ta, mu
; CHECK-NEXT: sd a0, -64(s0)
; CHECK-NEXT: ld a0, -64(s0)
; CHECK-NEXT: addi a1, s0, -56
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
; CHECK-NEXT: addi a0, s0, -56
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vle32.v v8, (a1)
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: sub a0, s0, a0
; CHECK-NEXT: addi a0, a0, -112
; CHECK-NEXT: vs8r.v v8, (a0)
; CHECK-NEXT: addi s1, a0, -112
; CHECK-NEXT: vs8r.v v8, (s1)
; CHECK-NEXT: li a0, 1
; CHECK-NEXT: sw a0, -68(s0)
; CHECK-NEXT: sw a0, -72(s0)
Expand All @@ -97,11 +98,7 @@ define dso_local signext i32 @main() #0 {
; CHECK-NEXT: sw a0, -104(s0)
; CHECK-NEXT: lw a0, -68(s0)
; CHECK-NEXT: lw a1, -72(s0)
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: slli a2, a2, 3
; CHECK-NEXT: sub a2, s0, a2
; CHECK-NEXT: addi a2, a2, -112
; CHECK-NEXT: vl8re32.v v8, (a2)
; CHECK-NEXT: vl8re32.v v8, (s1)
; CHECK-NEXT: lw a2, -76(s0)
; CHECK-NEXT: lw a3, -80(s0)
; CHECK-NEXT: lw a4, -84(s0)
Expand All @@ -117,11 +114,7 @@ define dso_local signext i32 @main() #0 {
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: lw a0, -68(s0)
; CHECK-NEXT: lw a1, -72(s0)
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: slli a2, a2, 3
; CHECK-NEXT: sub a2, s0, a2
; CHECK-NEXT: addi a2, a2, -112
; CHECK-NEXT: vl8re32.v v8, (a2)
; CHECK-NEXT: vl8re32.v v8, (s1)
; CHECK-NEXT: lw a2, -76(s0)
; CHECK-NEXT: lw a3, -80(s0)
; CHECK-NEXT: lw a4, -84(s0)
Expand All @@ -139,6 +132,7 @@ define dso_local signext i32 @main() #0 {
; CHECK-NEXT: addi sp, s0, -112
; CHECK-NEXT: ld ra, 104(sp) # 8-byte Folded Reload
; CHECK-NEXT: ld s0, 96(sp) # 8-byte Folded Reload
; CHECK-NEXT: ld s1, 88(sp) # 8-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 112
; CHECK-NEXT: ret
entry:
Expand Down
72 changes: 36 additions & 36 deletions llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll

Large diffs are not rendered by default.

40 changes: 20 additions & 20 deletions llvm/test/CodeGen/RISCV/rvv/setcc-integer.ll
Original file line number Diff line number Diff line change
Expand Up @@ -2243,8 +2243,8 @@ define <vscale x 8 x i1> @icmp_eq_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmseq.vv v0, v8, v16
; RV32-NEXT: addi sp, sp, 16
Expand All @@ -2268,8 +2268,8 @@ define <vscale x 8 x i1> @icmp_eq_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmseq.vv v0, v16, v8
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -2339,8 +2339,8 @@ define <vscale x 8 x i1> @icmp_ne_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmsne.vv v0, v8, v16
; RV32-NEXT: addi sp, sp, 16
Expand All @@ -2364,8 +2364,8 @@ define <vscale x 8 x i1> @icmp_ne_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmsne.vv v0, v16, v8
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -2411,8 +2411,8 @@ define <vscale x 8 x i1> @icmp_ugt_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmsltu.vv v0, v16, v8
; RV32-NEXT: addi sp, sp, 16
Expand All @@ -2436,8 +2436,8 @@ define <vscale x 8 x i1> @icmp_ugt_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmsltu.vv v0, v8, v16
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -2483,8 +2483,8 @@ define <vscale x 8 x i1> @icmp_uge_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmsleu.vv v0, v16, v8
; RV32-NEXT: addi sp, sp, 16
Expand All @@ -2509,8 +2509,8 @@ define <vscale x 8 x i1> @icmp_uge_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmsleu.vv v0, v8, v16
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -2629,8 +2629,8 @@ define <vscale x 8 x i1> @icmp_ult_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmsltu.vv v0, v8, v16
; RV32-NEXT: addi sp, sp, 16
Expand All @@ -2654,8 +2654,8 @@ define <vscale x 8 x i1> @icmp_ult_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmsltu.vv v0, v16, v8
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -2762,8 +2762,8 @@ define <vscale x 8 x i1> @icmp_ule_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmsleu.vv v0, v8, v16
; RV32-NEXT: addi sp, sp, 16
Expand All @@ -2787,8 +2787,8 @@ define <vscale x 8 x i1> @icmp_ule_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmsleu.vv v0, v16, v8
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -2835,8 +2835,8 @@ define <vscale x 8 x i1> @icmp_sgt_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmslt.vv v0, v16, v8
; RV32-NEXT: addi sp, sp, 16
Expand All @@ -2860,8 +2860,8 @@ define <vscale x 8 x i1> @icmp_sgt_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmslt.vv v0, v8, v16
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -2907,8 +2907,8 @@ define <vscale x 8 x i1> @icmp_sge_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmsle.vv v0, v16, v8
; RV32-NEXT: addi sp, sp, 16
Expand All @@ -2933,8 +2933,8 @@ define <vscale x 8 x i1> @icmp_sge_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmsle.vv v0, v8, v16
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -3029,8 +3029,8 @@ define <vscale x 8 x i1> @icmp_slt_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmslt.vv v0, v8, v16
; RV32-NEXT: addi sp, sp, 16
Expand All @@ -3054,8 +3054,8 @@ define <vscale x 8 x i1> @icmp_slt_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmslt.vv v0, v16, v8
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -3150,8 +3150,8 @@ define <vscale x 8 x i1> @icmp_sle_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmsle.vv v0, v8, v16
; RV32-NEXT: addi sp, sp, 16
Expand All @@ -3175,8 +3175,8 @@ define <vscale x 8 x i1> @icmp_sle_xv_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vmsle.vv v0, v16, v8
; RV32-NEXT: addi sp, sp, 16
Expand Down
7 changes: 3 additions & 4 deletions llvm/test/CodeGen/RISCV/rvv/stepvector.ll
Original file line number Diff line number Diff line change
Expand Up @@ -501,8 +501,8 @@ define <vscale x 8 x i64> @mul_bigimm_stepvector_nxv8i64() {
; RV32-NEXT: lui a0, 797989
; RV32-NEXT: addi a0, a0, -683
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v8, (a0), zero
; RV32-NEXT: vid.v v16
; RV32-NEXT: vmul.vv v8, v16, v8
Expand Down Expand Up @@ -552,8 +552,8 @@ define <vscale x 16 x i64> @stepvector_nxv16i64() {
; RV32-NEXT: sw zero, 12(sp)
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vid.v v8
; RV32-NEXT: vadd.vv v16, v8, v16
Expand Down Expand Up @@ -667,10 +667,9 @@ define <vscale x 16 x i64> @mul_bigimm_stepvector_nxv16i64() {
; RV32-NEXT: mulhu a0, a0, a2
; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: sw a0, 12(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v8, (a0), zero
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vid.v v24
; RV32-NEXT: vmul.vv v8, v24, v8
Expand Down
10 changes: 5 additions & 5 deletions llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll
Original file line number Diff line number Diff line change
Expand Up @@ -293,8 +293,8 @@ define <vscale x 1 x i64> @intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vmacc.vv v8, v9, v10
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -328,8 +328,8 @@ define <vscale x 1 x i64> @intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vmadd.vv v8, v10, v9
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -363,8 +363,8 @@ define <vscale x 1 x i64> @intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64(<vscale x 1
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vnmsac.vv v8, v9, v10
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -398,8 +398,8 @@ define <vscale x 1 x i64> @intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64(<vscale x 1
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vnmsub.vv v8, v10, v9
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -919,8 +919,8 @@ define <vscale x 1 x i64> @intrinsic_vmv.s.x_x_nxv1i64(i64 %0, iXLen %1) nounwin
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v8, (a0), zero
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
Expand Down
18 changes: 9 additions & 9 deletions llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1151,8 +1151,8 @@ define <vscale x 1 x i64> @intrinsic_vrsub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu
; RV32-NEXT: vsub.vv v8, v10, v9
Expand Down Expand Up @@ -1186,8 +1186,8 @@ define <vscale x 1 x i64> @intrinsic_vsadd_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu
; RV32-NEXT: vsadd.vv v8, v9, v10
Expand Down Expand Up @@ -1287,8 +1287,8 @@ define <vscale x 1 x i64> @intrinsic_vsmul_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu
; RV32-NEXT: vsmul.vv v8, v9, v10
Expand Down Expand Up @@ -1453,8 +1453,8 @@ define <vscale x 1 x i64> @intrinsic_vssub_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu
; RV32-NEXT: vssub.vv v8, v9, v10
Expand Down Expand Up @@ -1488,8 +1488,8 @@ define <vscale x 1 x i64> @intrinsic_vssubu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu
; RV32-NEXT: vssubu.vv v8, v9, v10
Expand Down Expand Up @@ -2477,8 +2477,8 @@ define <vscale x 8 x i64> @intrinsic_vmerge_vxm_nxv8i64_nxv8i64_i64(<vscale x 8
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, mu
; RV32-NEXT: vmerge.vvm v8, v16, v24, v0
Expand Down Expand Up @@ -2509,9 +2509,9 @@ define <vscale x 8 x i64> @intrinsic_vmerge_vim_nxv8i64_nxv8i64_i64(<vscale x 8
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: li a1, -1
; RV32-NEXT: sw a1, 8(sp)
; RV32-NEXT: addi a1, sp, 8
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, zero, e64, m8, tu, mu
; RV32-NEXT: vmerge.vvm v8, v16, v24, v0
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -2658,8 +2658,8 @@ define <vscale x 1 x i64> @intrinsic_vmv.v.x_x_nxv1i64(<vscale x 1 x i64> %0, i6
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli zero, a2, e64, m1, tu, mu
; RV32-NEXT: vlse64.v v8, (a0), zero
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
Expand Down
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/vaadd.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1859,8 +1859,8 @@ define <vscale x 1 x i64> @intrinsic_vaadd_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vaadd.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -1894,8 +1894,8 @@ define <vscale x 1 x i64> @intrinsic_vaadd_mask_vx_nxv1i64_nxv1i64_i64(<vscale x
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vaadd.vv v8, v9, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -1929,8 +1929,8 @@ define <vscale x 2 x i64> @intrinsic_vaadd_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vaadd.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -1964,8 +1964,8 @@ define <vscale x 2 x i64> @intrinsic_vaadd_mask_vx_nxv2i64_nxv2i64_i64(<vscale x
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vaadd.vv v8, v10, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -1999,8 +1999,8 @@ define <vscale x 4 x i64> @intrinsic_vaadd_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vaadd.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -2034,8 +2034,8 @@ define <vscale x 4 x i64> @intrinsic_vaadd_mask_vx_nxv4i64_nxv4i64_i64(<vscale x
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vaadd.vv v8, v12, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -2069,8 +2069,8 @@ define <vscale x 8 x i64> @intrinsic_vaadd_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vaadd.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -2104,8 +2104,8 @@ define <vscale x 8 x i64> @intrinsic_vaadd_mask_vx_nxv8i64_nxv8i64_i64(<vscale x
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vaadd.vv v8, v16, v24, v0.t
; RV32-NEXT: addi sp, sp, 16
Expand Down
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/vaaddu.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1859,8 +1859,8 @@ define <vscale x 1 x i64> @intrinsic_vaaddu_vx_nxv1i64_nxv1i64_i64(<vscale x 1 x
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vaaddu.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -1894,8 +1894,8 @@ define <vscale x 1 x i64> @intrinsic_vaaddu_mask_vx_nxv1i64_nxv1i64_i64(<vscale
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vaaddu.vv v8, v9, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -1929,8 +1929,8 @@ define <vscale x 2 x i64> @intrinsic_vaaddu_vx_nxv2i64_nxv2i64_i64(<vscale x 2 x
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vaaddu.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -1964,8 +1964,8 @@ define <vscale x 2 x i64> @intrinsic_vaaddu_mask_vx_nxv2i64_nxv2i64_i64(<vscale
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vaaddu.vv v8, v10, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -1999,8 +1999,8 @@ define <vscale x 4 x i64> @intrinsic_vaaddu_vx_nxv4i64_nxv4i64_i64(<vscale x 4 x
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vaaddu.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -2034,8 +2034,8 @@ define <vscale x 4 x i64> @intrinsic_vaaddu_mask_vx_nxv4i64_nxv4i64_i64(<vscale
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vaaddu.vv v8, v12, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -2069,8 +2069,8 @@ define <vscale x 8 x i64> @intrinsic_vaaddu_vx_nxv8i64_nxv8i64_i64(<vscale x 8 x
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vaaddu.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -2104,8 +2104,8 @@ define <vscale x 8 x i64> @intrinsic_vaaddu_mask_vx_nxv8i64_nxv8i64_i64(<vscale
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v24, (a0), zero
; RV32-NEXT: vaaddu.vv v8, v16, v24, v0.t
; RV32-NEXT: addi sp, sp, 16
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/RISCV/rvv/vadc-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -974,8 +974,8 @@ define <vscale x 1 x i64> @intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64(<vscale x 1 x
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; CHECK-NEXT: vlse64.v v9, (a0), zero
; CHECK-NEXT: vadc.vvm v8, v8, v9, v0
; CHECK-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -1004,8 +1004,8 @@ define <vscale x 2 x i64> @intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64(<vscale x 2 x
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; CHECK-NEXT: vlse64.v v10, (a0), zero
; CHECK-NEXT: vadc.vvm v8, v8, v10, v0
; CHECK-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -1034,8 +1034,8 @@ define <vscale x 4 x i64> @intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64(<vscale x 4 x
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; CHECK-NEXT: vlse64.v v12, (a0), zero
; CHECK-NEXT: vadc.vvm v8, v8, v12, v0
; CHECK-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -1064,8 +1064,8 @@ define <vscale x 8 x i64> @intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64(<vscale x 8 x
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; CHECK-NEXT: vlse64.v v16, (a0), zero
; CHECK-NEXT: vadc.vvm v8, v8, v16, v0
; CHECK-NEXT: addi sp, sp, 16
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll
Original file line number Diff line number Diff line change
Expand Up @@ -672,8 +672,8 @@ define <vscale x 1 x i64> @vadd_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vadd.vv v8, v8, v9
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -721,8 +721,8 @@ define <vscale x 2 x i64> @vadd_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vadd.vv v8, v8, v10
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -770,8 +770,8 @@ define <vscale x 4 x i64> @vadd_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vadd.vv v8, v8, v12
; RV32-NEXT: addi sp, sp, 16
Expand Down Expand Up @@ -819,8 +819,8 @@ define <vscale x 8 x i64> @vadd_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vadd.vv v8, v8, v16
; RV32-NEXT: addi sp, sp, 16
Expand Down
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1710,8 +1710,8 @@ define <vscale x 1 x i64> @vadd_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vsca
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vadd.vv v8, v8, v9, v0.t
Expand All @@ -1736,8 +1736,8 @@ define <vscale x 1 x i64> @vadd_vx_nxv1i64_unmasked(<vscale x 1 x i64> %va, i64
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, mu
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: vadd.vv v8, v8, v9
Expand Down Expand Up @@ -1814,8 +1814,8 @@ define <vscale x 2 x i64> @vadd_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vsca
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; RV32-NEXT: vadd.vv v8, v8, v10, v0.t
Expand All @@ -1840,8 +1840,8 @@ define <vscale x 2 x i64> @vadd_vx_nxv2i64_unmasked(<vscale x 2 x i64> %va, i64
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, mu
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
; RV32-NEXT: vadd.vv v8, v8, v10
Expand Down Expand Up @@ -1918,8 +1918,8 @@ define <vscale x 4 x i64> @vadd_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vsca
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: vadd.vv v8, v8, v12, v0.t
Expand All @@ -1944,8 +1944,8 @@ define <vscale x 4 x i64> @vadd_vx_nxv4i64_unmasked(<vscale x 4 x i64> %va, i64
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, mu
; RV32-NEXT: vlse64.v v12, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
; RV32-NEXT: vadd.vv v8, v8, v12
Expand Down Expand Up @@ -2022,8 +2022,8 @@ define <vscale x 8 x i64> @vadd_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vsca
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
Expand All @@ -2048,8 +2048,8 @@ define <vscale x 8 x i64> @vadd_vx_nxv8i64_unmasked(<vscale x 8 x i64> %va, i64
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, mu
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
; RV32-NEXT: vadd.vv v8, v8, v16
Expand Down
Loading