90 changes: 50 additions & 40 deletions llvm/test/CodeGen/RISCV/rvv/vsplats-i64.ll
Original file line number Diff line number Diff line change
Expand Up @@ -60,13 +60,16 @@ define <vscale x 8 x i64> @vsplat_nxv8i64_3() {
define <vscale x 8 x i64> @vsplat_nxv8i64_4() {
; RV32V-LABEL: vsplat_nxv8i64_4:
; RV32V: # %bb.0:
; RV32V-NEXT: addi sp, sp, -16
; RV32V-NEXT: .cfi_def_cfa_offset 16
; RV32V-NEXT: sw zero, 12(sp)
; RV32V-NEXT: lui a0, 1028096
; RV32V-NEXT: addi a0, a0, -1281
; RV32V-NEXT: vsetvli a1, zero, e64,m8,ta,mu
; RV32V-NEXT: vmv.v.x v8, a0
; RV32V-NEXT: addi a0, zero, 32
; RV32V-NEXT: vsll.vx v8, v8, a0
; RV32V-NEXT: vsrl.vx v8, v8, a0
; RV32V-NEXT: sw a0, 8(sp)
; RV32V-NEXT: vsetvli a0, zero, e64,m8,ta,mu
; RV32V-NEXT: addi a0, sp, 8
; RV32V-NEXT: vlse64.v v8, (a0), zero
; RV32V-NEXT: addi sp, sp, 16
; RV32V-NEXT: ret
;
; RV64V-LABEL: vsplat_nxv8i64_4:
Expand All @@ -85,14 +88,14 @@ define <vscale x 8 x i64> @vsplat_nxv8i64_4() {
define <vscale x 8 x i64> @vsplat_nxv8i64_5(i64 %a) {
; RV32V-LABEL: vsplat_nxv8i64_5:
; RV32V: # %bb.0:
; RV32V-NEXT: vsetvli a2, zero, e64,m8,ta,mu
; RV32V-NEXT: vmv.v.x v8, a1
; RV32V-NEXT: addi a1, zero, 32
; RV32V-NEXT: vsll.vx v8, v8, a1
; RV32V-NEXT: vmv.v.x v16, a0
; RV32V-NEXT: vsll.vx v16, v16, a1
; RV32V-NEXT: vsrl.vx v16, v16, a1
; RV32V-NEXT: vor.vv v8, v16, v8
; RV32V-NEXT: addi sp, sp, -16
; RV32V-NEXT: .cfi_def_cfa_offset 16
; RV32V-NEXT: sw a1, 12(sp)
; RV32V-NEXT: sw a0, 8(sp)
; RV32V-NEXT: vsetvli a0, zero, e64,m8,ta,mu
; RV32V-NEXT: addi a0, sp, 8
; RV32V-NEXT: vlse64.v v8, (a0), zero
; RV32V-NEXT: addi sp, sp, 16
; RV32V-NEXT: ret
;
; RV64V-LABEL: vsplat_nxv8i64_5:
Expand Down Expand Up @@ -186,14 +189,17 @@ define <vscale x 8 x i64> @vadd_vx_nxv8i64_9(<vscale x 8 x i64> %v) {
define <vscale x 8 x i64> @vadd_vx_nxv8i64_10(<vscale x 8 x i64> %v) {
; RV32V-LABEL: vadd_vx_nxv8i64_10:
; RV32V: # %bb.0:
; RV32V-NEXT: addi sp, sp, -16
; RV32V-NEXT: .cfi_def_cfa_offset 16
; RV32V-NEXT: sw zero, 12(sp)
; RV32V-NEXT: lui a0, 1028096
; RV32V-NEXT: addi a0, a0, -1281
; RV32V-NEXT: vsetvli a1, zero, e64,m8,ta,mu
; RV32V-NEXT: vmv.v.x v16, a0
; RV32V-NEXT: addi a0, zero, 32
; RV32V-NEXT: vsll.vx v16, v16, a0
; RV32V-NEXT: vsrl.vx v16, v16, a0
; RV32V-NEXT: sw a0, 8(sp)
; RV32V-NEXT: vsetvli a0, zero, e64,m8,ta,mu
; RV32V-NEXT: addi a0, sp, 8
; RV32V-NEXT: vlse64.v v16, (a0), zero
; RV32V-NEXT: vadd.vv v8, v8, v16
; RV32V-NEXT: addi sp, sp, 16
; RV32V-NEXT: ret
;
; RV64V-LABEL: vadd_vx_nxv8i64_10:
Expand All @@ -213,17 +219,18 @@ define <vscale x 8 x i64> @vadd_vx_nxv8i64_10(<vscale x 8 x i64> %v) {
define <vscale x 8 x i64> @vadd_vx_nxv8i64_11(<vscale x 8 x i64> %v) {
; RV32V-LABEL: vadd_vx_nxv8i64_11:
; RV32V: # %bb.0:
; RV32V-NEXT: addi sp, sp, -16
; RV32V-NEXT: .cfi_def_cfa_offset 16
; RV32V-NEXT: addi a0, zero, 1
; RV32V-NEXT: sw a0, 12(sp)
; RV32V-NEXT: lui a0, 1028096
; RV32V-NEXT: addi a0, a0, -1281
; RV32V-NEXT: sw a0, 8(sp)
; RV32V-NEXT: vsetvli a0, zero, e64,m8,ta,mu
; RV32V-NEXT: vmv.v.i v16, 1
; RV32V-NEXT: addi a0, zero, 32
; RV32V-NEXT: vsll.vx v16, v16, a0
; RV32V-NEXT: lui a1, 1028096
; RV32V-NEXT: addi a1, a1, -1281
; RV32V-NEXT: vmv.v.x v24, a1
; RV32V-NEXT: vsll.vx v24, v24, a0
; RV32V-NEXT: vsrl.vx v24, v24, a0
; RV32V-NEXT: vor.vv v16, v24, v16
; RV32V-NEXT: addi a0, sp, 8
; RV32V-NEXT: vlse64.v v16, (a0), zero
; RV32V-NEXT: vadd.vv v8, v8, v16
; RV32V-NEXT: addi sp, sp, 16
; RV32V-NEXT: ret
;
; RV64V-LABEL: vadd_vx_nxv8i64_11:
Expand All @@ -243,15 +250,15 @@ define <vscale x 8 x i64> @vadd_vx_nxv8i64_11(<vscale x 8 x i64> %v) {
define <vscale x 8 x i64> @vadd_vx_nxv8i64_12(<vscale x 8 x i64> %v, i64 %a) {
; RV32V-LABEL: vadd_vx_nxv8i64_12:
; RV32V: # %bb.0:
; RV32V-NEXT: vsetvli a2, zero, e64,m8,ta,mu
; RV32V-NEXT: vmv.v.x v16, a1
; RV32V-NEXT: addi a1, zero, 32
; RV32V-NEXT: vsll.vx v16, v16, a1
; RV32V-NEXT: vmv.v.x v24, a0
; RV32V-NEXT: vsll.vx v24, v24, a1
; RV32V-NEXT: vsrl.vx v24, v24, a1
; RV32V-NEXT: vor.vv v16, v24, v16
; RV32V-NEXT: addi sp, sp, -16
; RV32V-NEXT: .cfi_def_cfa_offset 16
; RV32V-NEXT: sw a1, 12(sp)
; RV32V-NEXT: sw a0, 8(sp)
; RV32V-NEXT: vsetvli a0, zero, e64,m8,ta,mu
; RV32V-NEXT: addi a0, sp, 8
; RV32V-NEXT: vlse64.v v16, (a0), zero
; RV32V-NEXT: vadd.vv v8, v8, v16
; RV32V-NEXT: addi sp, sp, 16
; RV32V-NEXT: ret
;
; RV64V-LABEL: vadd_vx_nxv8i64_12:
Expand Down Expand Up @@ -287,11 +294,14 @@ define <vscale x 8 x i64> @vsplat_nxv8i64_13(i32 %a) {
define <vscale x 8 x i64> @vsplat_nxv8i64_14(i32 %a) {
; RV32V-LABEL: vsplat_nxv8i64_14:
; RV32V: # %bb.0:
; RV32V-NEXT: vsetvli a1, zero, e64,m8,ta,mu
; RV32V-NEXT: vmv.v.x v8, a0
; RV32V-NEXT: addi a0, zero, 32
; RV32V-NEXT: vsll.vx v8, v8, a0
; RV32V-NEXT: vsrl.vx v8, v8, a0
; RV32V-NEXT: addi sp, sp, -16
; RV32V-NEXT: .cfi_def_cfa_offset 16
; RV32V-NEXT: sw zero, 12(sp)
; RV32V-NEXT: sw a0, 8(sp)
; RV32V-NEXT: vsetvli a0, zero, e64,m8,ta,mu
; RV32V-NEXT: addi a0, sp, 8
; RV32V-NEXT: vlse64.v v8, (a0), zero
; RV32V-NEXT: addi sp, sp, 16
; RV32V-NEXT: ret
;
; RV64V-LABEL: vsplat_nxv8i64_14:
Expand Down
64 changes: 32 additions & 32 deletions llvm/test/CodeGen/RISCV/rvv/vsra-sdnode-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -626,15 +626,15 @@ define <vscale x 1 x i64> @vsra_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x
define <vscale x 1 x i64> @vsra_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vsra_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v25, (a0), zero
; CHECK-NEXT: vsra.vv v8, v8, v25
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
Expand Down Expand Up @@ -680,15 +680,15 @@ define <vscale x 2 x i64> @vsra_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x
define <vscale x 2 x i64> @vsra_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vsra_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v26, (a0), zero
; CHECK-NEXT: vsra.vv v8, v8, v26
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
Expand Down Expand Up @@ -734,15 +734,15 @@ define <vscale x 4 x i64> @vsra_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x
define <vscale x 4 x i64> @vsra_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vsra_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v28, (a0), zero
; CHECK-NEXT: vsra.vv v8, v8, v28
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
Expand Down Expand Up @@ -788,15 +788,15 @@ define <vscale x 8 x i64> @vsra_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x
define <vscale x 8 x i64> @vsra_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vsra_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v16, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v16, v24, v16
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v16, (a0), zero
; CHECK-NEXT: vsra.vv v8, v8, v16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
Expand Down
64 changes: 32 additions & 32 deletions llvm/test/CodeGen/RISCV/rvv/vsrl-sdnode-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -436,15 +436,15 @@ define <vscale x 16 x i32> @vsrl_vx_nxv16i32_0(<vscale x 16 x i32> %va) {
define <vscale x 1 x i64> @vsrl_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vsrl_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v25, (a0), zero
; CHECK-NEXT: vsrl.vv v8, v8, v25
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
Expand Down Expand Up @@ -480,15 +480,15 @@ define <vscale x 1 x i64> @vsrl_vx_nxv1i64_1(<vscale x 1 x i64> %va) {
define <vscale x 2 x i64> @vsrl_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vsrl_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v26, (a0), zero
; CHECK-NEXT: vsrl.vv v8, v8, v26
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
Expand Down Expand Up @@ -524,15 +524,15 @@ define <vscale x 2 x i64> @vsrl_vx_nxv2i64_1(<vscale x 2 x i64> %va) {
define <vscale x 4 x i64> @vsrl_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vsrl_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v28, (a0), zero
; CHECK-NEXT: vsrl.vv v8, v8, v28
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
Expand Down Expand Up @@ -568,15 +568,15 @@ define <vscale x 4 x i64> @vsrl_vx_nxv4i64_1(<vscale x 4 x i64> %va) {
define <vscale x 8 x i64> @vsrl_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vsrl_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v16, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v16, v24, v16
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v16, (a0), zero
; CHECK-NEXT: vsrl.vv v8, v8, v16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
Expand Down
64 changes: 32 additions & 32 deletions llvm/test/CodeGen/RISCV/rvv/vsub-sdnode-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -659,15 +659,15 @@ define <vscale x 1 x i64> @vsub_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x
define <vscale x 1 x i64> @vsub_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vsub_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v25, (a0), zero
; CHECK-NEXT: vsub.vv v8, v8, v25
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
Expand Down Expand Up @@ -701,15 +701,15 @@ define <vscale x 2 x i64> @vsub_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x
define <vscale x 2 x i64> @vsub_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vsub_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v26, (a0), zero
; CHECK-NEXT: vsub.vv v8, v8, v26
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
Expand Down Expand Up @@ -743,15 +743,15 @@ define <vscale x 4 x i64> @vsub_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x
define <vscale x 4 x i64> @vsub_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vsub_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v28, (a0), zero
; CHECK-NEXT: vsub.vv v8, v8, v28
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
Expand Down Expand Up @@ -785,15 +785,15 @@ define <vscale x 8 x i64> @vsub_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x
define <vscale x 8 x i64> @vsub_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vsub_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v16, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v16, v24, v16
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v16, (a0), zero
; CHECK-NEXT: vsub.vv v8, v8, v16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
Expand Down
64 changes: 32 additions & 32 deletions llvm/test/CodeGen/RISCV/rvv/vxor-sdnode-rv32.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1076,15 +1076,15 @@ define <vscale x 1 x i64> @vxor_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x
define <vscale x 1 x i64> @vxor_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
; CHECK-LABEL: vxor_vx_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vmv.v.x v26, a0
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v25, (a0), zero
; CHECK-NEXT: vxor.vv v8, v8, v25
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
Expand Down Expand Up @@ -1142,15 +1142,15 @@ define <vscale x 2 x i64> @vxor_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x
define <vscale x 2 x i64> @vxor_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
; CHECK-LABEL: vxor_vx_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e64,m2,ta,mu
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v26, v26, a1
; CHECK-NEXT: vmv.v.x v28, a0
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vor.vv v26, v28, v26
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v26, (a0), zero
; CHECK-NEXT: vxor.vv v8, v8, v26
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
Expand Down Expand Up @@ -1208,15 +1208,15 @@ define <vscale x 4 x i64> @vxor_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x
define <vscale x 4 x i64> @vxor_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
; CHECK-LABEL: vxor_vx_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e64,m4,ta,mu
; CHECK-NEXT: vmv.v.x v28, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v28, v28, a1
; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: vsll.vx v12, v12, a1
; CHECK-NEXT: vsrl.vx v12, v12, a1
; CHECK-NEXT: vor.vv v28, v12, v28
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v28, (a0), zero
; CHECK-NEXT: vxor.vv v8, v8, v28
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
Expand Down Expand Up @@ -1274,15 +1274,15 @@ define <vscale x 8 x i64> @vxor_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x
define <vscale x 8 x i64> @vxor_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
; CHECK-LABEL: vxor_vx_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu
; CHECK-NEXT: vmv.v.x v16, a1
; CHECK-NEXT: addi a1, zero, 32
; CHECK-NEXT: vsll.vx v16, v16, a1
; CHECK-NEXT: vmv.v.x v24, a0
; CHECK-NEXT: vsll.vx v24, v24, a1
; CHECK-NEXT: vsrl.vx v24, v24, a1
; CHECK-NEXT: vor.vv v16, v24, v16
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: sw a1, 12(sp)
; CHECK-NEXT: sw a0, 8(sp)
; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
; CHECK-NEXT: addi a0, sp, 8
; CHECK-NEXT: vlse64.v v16, (a0), zero
; CHECK-NEXT: vxor.vv v8, v8, v16
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
%splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
Expand Down