16 changes: 8 additions & 8 deletions llvm/test/CodeGen/AArch64/i128_volatile_load_store.ll
Original file line number Diff line number Diff line change
Expand Up @@ -38,11 +38,11 @@ define void @test3() {
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x8, x
; CHECK-NEXT: add x8, x8, :lo12:x
; CHECK-NEXT: add x8, x8, #512 // =512
; CHECK-NEXT: add x8, x8, #512
; CHECK-NEXT: ldp x8, x9, [x8]
; CHECK-NEXT: adrp x10, y
; CHECK-NEXT: add x10, x10, :lo12:y
; CHECK-NEXT: add x10, x10, #512 // =512
; CHECK-NEXT: add x10, x10, #512
; CHECK-NEXT: stp x8, x9, [x10]
; CHECK-NEXT: ret
%tmp = load volatile i128, i128* bitcast (i8* getelementptr (i8, i8* bitcast (i128* @x to i8*), i64 512) to i128*)
Expand Down Expand Up @@ -70,11 +70,11 @@ define void @test5() {
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x8, x
; CHECK-NEXT: add x8, x8, :lo12:x
; CHECK-NEXT: sub x8, x8, #520 // =520
; CHECK-NEXT: sub x8, x8, #520
; CHECK-NEXT: ldp x8, x9, [x8]
; CHECK-NEXT: adrp x10, y
; CHECK-NEXT: add x10, x10, :lo12:y
; CHECK-NEXT: sub x10, x10, #520 // =520
; CHECK-NEXT: sub x10, x10, #520
; CHECK-NEXT: stp x8, x9, [x10]
; CHECK-NEXT: ret
%tmp = load volatile i128, i128* bitcast (i8* getelementptr (i8, i8* bitcast (i128* @x to i8*), i64 -520) to i128*)
Expand All @@ -87,11 +87,11 @@ define void @test6() {
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x8, x
; CHECK-NEXT: add x8, x8, :lo12:x
; CHECK-NEXT: sub x8, x8, #520 // =520
; CHECK-NEXT: sub x8, x8, #520
; CHECK-NEXT: ldp x8, x9, [x8]
; CHECK-NEXT: adrp x10, y
; CHECK-NEXT: add x10, x10, :lo12:y
; CHECK-NEXT: sub x10, x10, #520 // =520
; CHECK-NEXT: sub x10, x10, #520
; CHECK-NEXT: stp x8, x9, [x10]
; CHECK-NEXT: ret
%tmp = load volatile i128, i128* bitcast (i8* getelementptr (i8, i8* bitcast (i128* @x to i8*), i64 -520) to i128*)
Expand All @@ -104,11 +104,11 @@ define void @test7() {
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x8, x
; CHECK-NEXT: add x8, x8, :lo12:x
; CHECK-NEXT: add x8, x8, #503 // =503
; CHECK-NEXT: add x8, x8, #503
; CHECK-NEXT: ldp x8, x9, [x8]
; CHECK-NEXT: adrp x10, y
; CHECK-NEXT: add x10, x10, :lo12:y
; CHECK-NEXT: add x10, x10, #503 // =503
; CHECK-NEXT: add x10, x10, #503
; CHECK-NEXT: stp x8, x9, [x10]
; CHECK-NEXT: ret
%tmp = load volatile i128, i128* bitcast (i8* getelementptr (i8, i8* bitcast (i128* @x to i8*), i64 503) to i128*)
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/AArch64/implicit-null-check.ll
Original file line number Diff line number Diff line change
Expand Up @@ -284,7 +284,7 @@ define i32 @imp_null_check_gep_load_with_use_dep(i32* %x, i32 %a) {
; CHECK-NEXT: // %bb.1: // %not_null
; CHECK-NEXT: add w9, w0, w1
; CHECK-NEXT: add w8, w9, w8
; CHECK-NEXT: add w0, w8, #4 // =4
; CHECK-NEXT: add w0, w8, #4
; CHECK-NEXT: ret
; CHECK-NEXT: .LBB11_2:
; CHECK-NEXT: mov w0, #42
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/AArch64/inc-of-add.ll
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ define i8 @scalar_i8(i8 %x, i8 %y) nounwind {
; CHECK-LABEL: scalar_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w0, w1
; CHECK-NEXT: add w0, w8, #1 // =1
; CHECK-NEXT: add w0, w8, #1
; CHECK-NEXT: ret
%t0 = add i8 %x, 1
%t1 = add i8 %y, %t0
Expand All @@ -21,7 +21,7 @@ define i16 @scalar_i16(i16 %x, i16 %y) nounwind {
; CHECK-LABEL: scalar_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w0, w1
; CHECK-NEXT: add w0, w8, #1 // =1
; CHECK-NEXT: add w0, w8, #1
; CHECK-NEXT: ret
%t0 = add i16 %x, 1
%t1 = add i16 %y, %t0
Expand All @@ -32,7 +32,7 @@ define i32 @scalar_i32(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: scalar_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w0, w1
; CHECK-NEXT: add w0, w8, #1 // =1
; CHECK-NEXT: add w0, w8, #1
; CHECK-NEXT: ret
%t0 = add i32 %x, 1
%t1 = add i32 %y, %t0
Expand All @@ -43,7 +43,7 @@ define i64 @scalar_i64(i64 %x, i64 %y) nounwind {
; CHECK-LABEL: scalar_i64:
; CHECK: // %bb.0:
; CHECK-NEXT: add x8, x0, x1
; CHECK-NEXT: add x0, x8, #1 // =1
; CHECK-NEXT: add x0, x8, #1
; CHECK-NEXT: ret
%t0 = add i64 %x, 1
%t1 = add i64 %y, %t0
Expand Down
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/AArch64/insert-subvector-res-legalization.ll
Original file line number Diff line number Diff line change
Expand Up @@ -101,9 +101,9 @@ define <vscale x 8 x i8> @vec_scalable_subvec_fixed_idx_nonzero_i8(<vscale x 8 x
; CHECK-NEXT: ld1b { z0.h }, p0/z, [x0]
; CHECK-NEXT: ldr d1, [x1]
; CHECK-NEXT: cnth x9
; CHECK-NEXT: sub x9, x9, #8 // =8
; CHECK-NEXT: sub x9, x9, #8
; CHECK-NEXT: mov w8, #8
; CHECK-NEXT: cmp x9, #8 // =8
; CHECK-NEXT: cmp x9, #8
; CHECK-NEXT: csel x8, x9, x8, lo
; CHECK-NEXT: lsl x8, x8, #1
; CHECK-NEXT: st1h { z0.h }, p0, [sp]
Expand Down Expand Up @@ -150,9 +150,9 @@ define <vscale x 4 x i16> @vec_scalable_subvec_fixed_idx_nonzero_i16(<vscale x 4
; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
; CHECK-NEXT: ldr d1, [x1]
; CHECK-NEXT: cntw x9
; CHECK-NEXT: sub x9, x9, #4 // =4
; CHECK-NEXT: sub x9, x9, #4
; CHECK-NEXT: mov w8, #4
; CHECK-NEXT: cmp x9, #4 // =4
; CHECK-NEXT: cmp x9, #4
; CHECK-NEXT: csel x8, x9, x8, lo
; CHECK-NEXT: lsl x8, x8, #2
; CHECK-NEXT: st1w { z0.s }, p0, [sp]
Expand Down Expand Up @@ -199,9 +199,9 @@ define <vscale x 2 x i32> @vec_scalable_subvec_fixed_idx_nonzero_i32(<vscale x 2
; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0]
; CHECK-NEXT: ldr d1, [x1]
; CHECK-NEXT: cntd x9
; CHECK-NEXT: sub x9, x9, #2 // =2
; CHECK-NEXT: sub x9, x9, #2
; CHECK-NEXT: mov w8, #2
; CHECK-NEXT: cmp x9, #2 // =2
; CHECK-NEXT: cmp x9, #2
; CHECK-NEXT: csel x8, x9, x8, lo
; CHECK-NEXT: lsl x8, x8, #3
; CHECK-NEXT: st1d { z0.d }, p0, [sp]
Expand All @@ -228,10 +228,10 @@ define <vscale x 2 x i32> @vec_scalable_subvec_fixed_idx_nonzero_large_i32(<vsca
; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0]
; CHECK-NEXT: ld1w { z1.s }, p1/z, [x1]
; CHECK-NEXT: cntd x8
; CHECK-NEXT: subs x8, x8, #8 // =8
; CHECK-NEXT: subs x8, x8, #8
; CHECK-NEXT: csel x8, xzr, x8, lo
; CHECK-NEXT: mov w9, #8
; CHECK-NEXT: cmp x8, #8 // =8
; CHECK-NEXT: cmp x8, #8
; CHECK-NEXT: ptrue p1.d, vl8
; CHECK-NEXT: csel x8, x8, x9, lo
; CHECK-NEXT: st1d { z0.d }, p0, [sp]
Expand Down
28 changes: 14 additions & 14 deletions llvm/test/CodeGen/AArch64/lack-of-signed-truncation-check.ll
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,7 @@ define i1 @add_ugecmp_bad_i16_i8_add(i16 %x, i16 %y) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w0, w1
; CHECK-NEXT: and w8, w8, #0xffff
; CHECK-NEXT: cmp w8, #255 // =255
; CHECK-NEXT: cmp w8, #255
; CHECK-NEXT: cset w0, hi
; CHECK-NEXT: ret
%tmp0 = add i16 %x, %y
Expand All @@ -285,7 +285,7 @@ define i1 @add_ugecmp_bad_i16_i8_add(i16 %x, i16 %y) nounwind {
define i1 @add_ugecmp_bad_i16_i8_cmp(i16 %x, i16 %y) nounwind {
; CHECK-LABEL: add_ugecmp_bad_i16_i8_cmp:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w0, #128 // =128
; CHECK-NEXT: add w8, w0, #128
; CHECK-NEXT: and w8, w8, #0xffff
; CHECK-NEXT: cmp w8, w1, uxth
; CHECK-NEXT: cset w0, hs
Expand All @@ -299,9 +299,9 @@ define i1 @add_ugecmp_bad_i16_i8_cmp(i16 %x, i16 %y) nounwind {
define i1 @add_ugecmp_bad_i8_i16(i16 %x) nounwind {
; CHECK-LABEL: add_ugecmp_bad_i8_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w0, #128 // =128
; CHECK-NEXT: add w8, w0, #128
; CHECK-NEXT: and w8, w8, #0xffff
; CHECK-NEXT: cmp w8, #127 // =127
; CHECK-NEXT: cmp w8, #127
; CHECK-NEXT: cset w0, hi
; CHECK-NEXT: ret
%tmp0 = add i16 %x, 128 ; 1U << (8-1)
Expand All @@ -313,9 +313,9 @@ define i1 @add_ugecmp_bad_i8_i16(i16 %x) nounwind {
define i1 @add_ugecmp_bad_i16_i8_c0notpoweroftwo(i16 %x) nounwind {
; CHECK-LABEL: add_ugecmp_bad_i16_i8_c0notpoweroftwo:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w0, #192 // =192
; CHECK-NEXT: add w8, w0, #192
; CHECK-NEXT: and w8, w8, #0xffff
; CHECK-NEXT: cmp w8, #255 // =255
; CHECK-NEXT: cmp w8, #255
; CHECK-NEXT: cset w0, hi
; CHECK-NEXT: ret
%tmp0 = add i16 %x, 192 ; (1U << (8-1)) + (1U << (8-1-1))
Expand All @@ -327,9 +327,9 @@ define i1 @add_ugecmp_bad_i16_i8_c0notpoweroftwo(i16 %x) nounwind {
define i1 @add_ugecmp_bad_i16_i8_c1notpoweroftwo(i16 %x) nounwind {
; CHECK-LABEL: add_ugecmp_bad_i16_i8_c1notpoweroftwo:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w0, #128 // =128
; CHECK-NEXT: add w8, w0, #128
; CHECK-NEXT: and w8, w8, #0xffff
; CHECK-NEXT: cmp w8, #767 // =767
; CHECK-NEXT: cmp w8, #767
; CHECK-NEXT: cset w0, hi
; CHECK-NEXT: ret
%tmp0 = add i16 %x, 128 ; 1U << (8-1)
Expand All @@ -341,9 +341,9 @@ define i1 @add_ugecmp_bad_i16_i8_c1notpoweroftwo(i16 %x) nounwind {
define i1 @add_ugecmp_bad_i16_i8_magic(i16 %x) nounwind {
; CHECK-LABEL: add_ugecmp_bad_i16_i8_magic:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w0, #64 // =64
; CHECK-NEXT: add w8, w0, #64
; CHECK-NEXT: and w8, w8, #0xffff
; CHECK-NEXT: cmp w8, #255 // =255
; CHECK-NEXT: cmp w8, #255
; CHECK-NEXT: cset w0, hi
; CHECK-NEXT: ret
%tmp0 = add i16 %x, 64 ; 1U << (8-1-1)
Expand All @@ -355,9 +355,9 @@ define i1 @add_ugecmp_bad_i16_i8_magic(i16 %x) nounwind {
define i1 @add_ugecmp_bad_i16_i4(i16 %x) nounwind {
; CHECK-LABEL: add_ugecmp_bad_i16_i4:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w0, #8 // =8
; CHECK-NEXT: add w8, w0, #8
; CHECK-NEXT: and w8, w8, #0xffff
; CHECK-NEXT: cmp w8, #15 // =15
; CHECK-NEXT: cmp w8, #15
; CHECK-NEXT: cset w0, hi
; CHECK-NEXT: ret
%tmp0 = add i16 %x, 8 ; 1U << (4-1)
Expand All @@ -369,9 +369,9 @@ define i1 @add_ugecmp_bad_i16_i4(i16 %x) nounwind {
define i1 @add_ugecmp_bad_i24_i8(i24 %x) nounwind {
; CHECK-LABEL: add_ugecmp_bad_i24_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w0, #128 // =128
; CHECK-NEXT: add w8, w0, #128
; CHECK-NEXT: and w8, w8, #0xffffff
; CHECK-NEXT: cmp w8, #255 // =255
; CHECK-NEXT: cmp w8, #255
; CHECK-NEXT: cset w0, hi
; CHECK-NEXT: ret
%tmp0 = add i24 %x, 128 ; 1U << (8-1)
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/AArch64/ldst-paired-aliasing.ll
Original file line number Diff line number Diff line change
Expand Up @@ -12,20 +12,20 @@ define i32 @main() local_unnamed_addr #1 {
; Make sure the stores happen in the correct order (the exact instructions could change).
; CHECK-LABEL: main:
; CHECK: // %bb.0: // %for.body.lr.ph.i.i.i.i.i.i63
; CHECK-NEXT: sub sp, sp, #112 // =112
; CHECK-NEXT: sub sp, sp, #112
; CHECK-NEXT: str x30, [sp, #96] // 8-byte Folded Spill
; CHECK-NEXT: .cfi_def_cfa_offset 112
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: bl _Z5setupv
; CHECK-NEXT: movi v0.4s, #1
; CHECK-NEXT: mov w9, #1
; CHECK-NEXT: add x0, sp, #48 // =48
; CHECK-NEXT: add x0, sp, #48
; CHECK-NEXT: mov x1, sp
; CHECK-NEXT: str xzr, [sp, #80]
; CHECK-NEXT: str w9, [sp, #80]
; CHECK-NEXT: stp q0, q0, [sp, #48]
; CHECK-NEXT: ldr w8, [sp, #48]
; CHECK-NEXT: cmp w8, #1 // =1
; CHECK-NEXT: cmp w8, #1
; CHECK-NEXT: b.ne .LBB0_2
; CHECK-NEXT: // %bb.1: // %for.inc
; CHECK-NEXT: bl f
Expand All @@ -35,7 +35,7 @@ define i32 @main() local_unnamed_addr #1 {
; CHECK-NEXT: .LBB0_3: // %common.ret
; CHECK-NEXT: ldr x30, [sp, #96] // 8-byte Folded Reload
; CHECK-NEXT: mov w0, wzr
; CHECK-NEXT: add sp, sp, #112 // =112
; CHECK-NEXT: add sp, sp, #112
; CHECK-NEXT: ret


Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/AArch64/logical_shifted_reg.ll
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,7 @@ define void @flag_setting() {
; CHECK-NEXT: b.lt .LBB2_4
; CHECK-NEXT: // %bb.2: // %test3
; CHECK-NEXT: and x10, x9, x10, asr #12
; CHECK-NEXT: cmp x10, #1 // =1
; CHECK-NEXT: cmp x10, #1
; CHECK-NEXT: b.ge .LBB2_4
; CHECK-NEXT: // %bb.3: // %other_exit
; CHECK-NEXT: str x9, [x8]
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/AArch64/ls64-inline-asm.ll
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ entry:
define void @store2(i32* %in, i8* %addr) {
; CHECK-LABEL: store2:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: sub sp, sp, #64 // =64
; CHECK-NEXT: sub sp, sp, #64
; CHECK-NEXT: .cfi_def_cfa_offset 64
; CHECK-NEXT: ldpsw x2, x3, [x0]
; CHECK-NEXT: ldrsw x4, [x0, #16]
Expand All @@ -54,7 +54,7 @@ define void @store2(i32* %in, i8* %addr) {
; CHECK-NEXT: //APP
; CHECK-NEXT: st64b x2, [x1]
; CHECK-NEXT: //NO_APP
; CHECK-NEXT: add sp, sp, #64 // =64
; CHECK-NEXT: add sp, sp, #64
; CHECK-NEXT: ret
entry:
%0 = load i32, i32* %in, align 4
Expand Down
12 changes: 6 additions & 6 deletions llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ define i32 @sink_load_and_copy(i32 %n) {
; CHECK-NEXT: .cfi_offset w21, -24
; CHECK-NEXT: .cfi_offset w30, -32
; CHECK-NEXT: mov w19, w0
; CHECK-NEXT: cmp w0, #1 // =1
; CHECK-NEXT: cmp w0, #1
; CHECK-NEXT: b.lt .LBB0_3
; CHECK-NEXT: // %bb.1: // %for.body.preheader
; CHECK-NEXT: adrp x8, A
Expand All @@ -26,7 +26,7 @@ define i32 @sink_load_and_copy(i32 %n) {
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: mov w0, w21
; CHECK-NEXT: bl _Z3usei
; CHECK-NEXT: subs w19, w19, #1 // =1
; CHECK-NEXT: subs w19, w19, #1
; CHECK-NEXT: sdiv w20, w20, w0
; CHECK-NEXT: b.ne .LBB0_2
; CHECK-NEXT: b .LBB0_4
Expand Down Expand Up @@ -70,7 +70,7 @@ define i32 @cant_sink_successive_call(i32 %n) {
; CHECK-NEXT: .cfi_offset w21, -24
; CHECK-NEXT: .cfi_offset w30, -32
; CHECK-NEXT: mov w19, w0
; CHECK-NEXT: cmp w0, #1 // =1
; CHECK-NEXT: cmp w0, #1
; CHECK-NEXT: b.lt .LBB1_3
; CHECK-NEXT: // %bb.1: // %for.body.preheader
; CHECK-NEXT: adrp x8, A
Expand All @@ -82,7 +82,7 @@ define i32 @cant_sink_successive_call(i32 %n) {
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: mov w0, w20
; CHECK-NEXT: bl _Z3usei
; CHECK-NEXT: subs w19, w19, #1 // =1
; CHECK-NEXT: subs w19, w19, #1
; CHECK-NEXT: sdiv w21, w21, w0
; CHECK-NEXT: b.ne .LBB1_2
; CHECK-NEXT: b .LBB1_4
Expand Down Expand Up @@ -127,7 +127,7 @@ define i32 @cant_sink_successive_store(i32* nocapture readnone %store, i32 %n) {
; CHECK-NEXT: .cfi_offset w21, -24
; CHECK-NEXT: .cfi_offset w30, -32
; CHECK-NEXT: mov w19, w1
; CHECK-NEXT: cmp w1, #1 // =1
; CHECK-NEXT: cmp w1, #1
; CHECK-NEXT: b.lt .LBB2_3
; CHECK-NEXT: // %bb.1: // %for.body.preheader
; CHECK-NEXT: adrp x8, A
Expand All @@ -139,7 +139,7 @@ define i32 @cant_sink_successive_store(i32* nocapture readnone %store, i32 %n) {
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: mov w0, w20
; CHECK-NEXT: bl _Z3usei
; CHECK-NEXT: subs w19, w19, #1 // =1
; CHECK-NEXT: subs w19, w19, #1
; CHECK-NEXT: sdiv w21, w21, w0
; CHECK-NEXT: b.ne .LBB2_2
; CHECK-NEXT: b .LBB2_4
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/AArch64/machine-outliner-thunk.ll
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ define i32 @a() {
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: bl [[OUTLINED_DIRECT:OUTLINED_FUNCTION_[0-9]+]]
; CHECK-NEXT: add w0, w0, #8 // =8
; CHECK-NEXT: add w0, w0, #8
; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
entry:
Expand All @@ -29,7 +29,7 @@ define i32 @b() {
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: bl [[OUTLINED_DIRECT]]
; CHECK-NEXT: add w0, w0, #88 // =88
; CHECK-NEXT: add w0, w0, #88
; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
entry:
Expand All @@ -45,7 +45,7 @@ define hidden i32 @c(i32 (i32, i32, i32, i32)* %fptr) {
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: bl [[OUTLINED_INDIRECT:OUTLINED_FUNCTION_[0-9]+]]
; CHECK-NEXT: add w0, w0, #8 // =8
; CHECK-NEXT: add w0, w0, #8
; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
entry:
Expand All @@ -61,7 +61,7 @@ define hidden i32 @d(i32 (i32, i32, i32, i32)* %fptr) {
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: bl [[OUTLINED_INDIRECT]]
; CHECK-NEXT: add w0, w0, #88 // =88
; CHECK-NEXT: add w0, w0, #88
; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
; CHECK-NEXT: ret
entry:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -106,15 +106,15 @@ define <8 x i32> @reverse_v8i32(<8 x i32> %a) #0 {
;
; CHECK-FASTISEL-LABEL: reverse_v8i32:
; CHECK-FASTISEL: // %bb.0:
; CHECK-FASTISEL-NEXT: sub sp, sp, #16 // =16
; CHECK-FASTISEL-NEXT: sub sp, sp, #16
; CHECK-FASTISEL-NEXT: str q1, [sp] // 16-byte Folded Spill
; CHECK-FASTISEL-NEXT: mov v1.16b, v0.16b
; CHECK-FASTISEL-NEXT: ldr q0, [sp] // 16-byte Folded Reload
; CHECK-FASTISEL-NEXT: rev64 v0.4s, v0.4s
; CHECK-FASTISEL-NEXT: ext v0.16b, v0.16b, v0.16b, #8
; CHECK-FASTISEL-NEXT: rev64 v1.4s, v1.4s
; CHECK-FASTISEL-NEXT: ext v1.16b, v1.16b, v1.16b, #8
; CHECK-FASTISEL-NEXT: add sp, sp, #16 // =16
; CHECK-FASTISEL-NEXT: add sp, sp, #16
; CHECK-FASTISEL-NEXT: ret

%res = call <8 x i32> @llvm.experimental.vector.reverse.v8i32(<8 x i32> %a)
Expand All @@ -137,7 +137,7 @@ define <16 x float> @reverse_v16f32(<16 x float> %a) #0 {
;
; CHECK-FASTISEL-LABEL: reverse_v16f32:
; CHECK-FASTISEL: // %bb.0:
; CHECK-FASTISEL-NEXT: sub sp, sp, #32 // =32
; CHECK-FASTISEL-NEXT: sub sp, sp, #32
; CHECK-FASTISEL-NEXT: str q3, [sp, #16] // 16-byte Folded Spill
; CHECK-FASTISEL-NEXT: str q2, [sp] // 16-byte Folded Spill
; CHECK-FASTISEL-NEXT: mov v2.16b, v1.16b
Expand All @@ -152,7 +152,7 @@ define <16 x float> @reverse_v16f32(<16 x float> %a) #0 {
; CHECK-FASTISEL-NEXT: ext v2.16b, v2.16b, v2.16b, #8
; CHECK-FASTISEL-NEXT: rev64 v3.4s, v3.4s
; CHECK-FASTISEL-NEXT: ext v3.16b, v3.16b, v3.16b, #8
; CHECK-FASTISEL-NEXT: add sp, sp, #32 // =32
; CHECK-FASTISEL-NEXT: add sp, sp, #32
; CHECK-FASTISEL-NEXT: ret

%res = call <16 x float> @llvm.experimental.vector.reverse.v16f32(<16 x float> %a)
Expand Down
62 changes: 31 additions & 31 deletions llvm/test/CodeGen/AArch64/named-vector-shuffles-sve.ll
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,11 @@ define <vscale x 16 x i8> @splice_nxv16i8_clamped_idx(<vscale x 16 x i8> %a, <vs
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-2
; CHECK-NEXT: rdvl x9, #1
; CHECK-NEXT: sub x9, x9, #1 // =1
; CHECK-NEXT: sub x9, x9, #1
; CHECK-NEXT: ptrue p0.b
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: mov w10, #16
; CHECK-NEXT: cmp x9, #16 // =16
; CHECK-NEXT: cmp x9, #16
; CHECK-NEXT: st1b { z0.b }, p0, [sp]
; CHECK-NEXT: st1b { z1.b }, p0, [x8, #1, mul vl]
; CHECK-NEXT: csel x9, x9, x10, lo
Expand Down Expand Up @@ -73,11 +73,11 @@ define <vscale x 8 x i16> @splice_nxv8i16_clamped_idx(<vscale x 8 x i16> %a, <vs
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-2
; CHECK-NEXT: cnth x10
; CHECK-NEXT: sub x10, x10, #1 // =1
; CHECK-NEXT: sub x10, x10, #1
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: mov w9, #8
; CHECK-NEXT: cmp x10, #8 // =8
; CHECK-NEXT: cmp x10, #8
; CHECK-NEXT: st1h { z0.h }, p0, [sp]
; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl]
; CHECK-NEXT: csel x9, x10, x9, lo
Expand Down Expand Up @@ -114,11 +114,11 @@ define <vscale x 4 x i32> @splice_nxv4i32_clamped_idx(<vscale x 4 x i32> %a, <vs
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-2
; CHECK-NEXT: cntw x10
; CHECK-NEXT: sub x10, x10, #1 // =1
; CHECK-NEXT: sub x10, x10, #1
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: mov w9, #4
; CHECK-NEXT: cmp x10, #4 // =4
; CHECK-NEXT: cmp x10, #4
; CHECK-NEXT: st1w { z0.s }, p0, [sp]
; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl]
; CHECK-NEXT: csel x9, x10, x9, lo
Expand Down Expand Up @@ -155,11 +155,11 @@ define <vscale x 2 x i64> @splice_nxv2i64_clamped_idx(<vscale x 2 x i64> %a, <vs
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-2
; CHECK-NEXT: cntd x10
; CHECK-NEXT: sub x10, x10, #1 // =1
; CHECK-NEXT: sub x10, x10, #1
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: mov w9, #2
; CHECK-NEXT: cmp x10, #2 // =2
; CHECK-NEXT: cmp x10, #2
; CHECK-NEXT: st1d { z0.d }, p0, [sp]
; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl]
; CHECK-NEXT: csel x9, x10, x9, lo
Expand Down Expand Up @@ -227,9 +227,9 @@ define <vscale x 2 x half> @splice_nxv2f16_last_idx(<vscale x 2 x half> %a, <vsc
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-2
; CHECK-NEXT: cntd x10
; CHECK-NEXT: sub x10, x10, #1 // =1
; CHECK-NEXT: sub x10, x10, #1
; CHECK-NEXT: mov w9, #2
; CHECK-NEXT: cmp x10, #2 // =2
; CHECK-NEXT: cmp x10, #2
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: csel x9, x10, x9, lo
Expand Down Expand Up @@ -301,9 +301,9 @@ define <vscale x 4 x half> @splice_nxv4f16_last_idx(<vscale x 4 x half> %a, <vsc
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-2
; CHECK-NEXT: cntw x10
; CHECK-NEXT: sub x10, x10, #1 // =1
; CHECK-NEXT: sub x10, x10, #1
; CHECK-NEXT: mov w9, #4
; CHECK-NEXT: cmp x10, #4 // =4
; CHECK-NEXT: cmp x10, #4
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: csel x9, x10, x9, lo
Expand Down Expand Up @@ -345,11 +345,11 @@ define <vscale x 8 x half> @splice_nxv8f16_clamped_idx(<vscale x 8 x half> %a, <
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-2
; CHECK-NEXT: cnth x10
; CHECK-NEXT: sub x10, x10, #1 // =1
; CHECK-NEXT: sub x10, x10, #1
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: mov w9, #8
; CHECK-NEXT: cmp x10, #8 // =8
; CHECK-NEXT: cmp x10, #8
; CHECK-NEXT: st1h { z0.h }, p0, [sp]
; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl]
; CHECK-NEXT: csel x9, x10, x9, lo
Expand Down Expand Up @@ -417,9 +417,9 @@ define <vscale x 2 x float> @splice_nxv2f32_last_idx(<vscale x 2 x float> %a, <v
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-2
; CHECK-NEXT: cntd x10
; CHECK-NEXT: sub x10, x10, #1 // =1
; CHECK-NEXT: sub x10, x10, #1
; CHECK-NEXT: mov w9, #2
; CHECK-NEXT: cmp x10, #2 // =2
; CHECK-NEXT: cmp x10, #2
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: csel x9, x10, x9, lo
Expand Down Expand Up @@ -460,11 +460,11 @@ define <vscale x 4 x float> @splice_nxv4f32_clamped_idx(<vscale x 4 x float> %a,
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-2
; CHECK-NEXT: cntw x10
; CHECK-NEXT: sub x10, x10, #1 // =1
; CHECK-NEXT: sub x10, x10, #1
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: mov w9, #4
; CHECK-NEXT: cmp x10, #4 // =4
; CHECK-NEXT: cmp x10, #4
; CHECK-NEXT: st1w { z0.s }, p0, [sp]
; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl]
; CHECK-NEXT: csel x9, x10, x9, lo
Expand Down Expand Up @@ -501,11 +501,11 @@ define <vscale x 2 x double> @splice_nxv2f64_clamped_idx(<vscale x 2 x double> %
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-2
; CHECK-NEXT: cntd x10
; CHECK-NEXT: sub x10, x10, #1 // =1
; CHECK-NEXT: sub x10, x10, #1
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: mov w9, #2
; CHECK-NEXT: cmp x10, #2 // =2
; CHECK-NEXT: cmp x10, #2
; CHECK-NEXT: st1d { z0.d }, p0, [sp]
; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl]
; CHECK-NEXT: csel x9, x10, x9, lo
Expand Down Expand Up @@ -616,11 +616,11 @@ define <vscale x 16 x float> @splice_nxv16f32_clamped_idx(<vscale x 16 x float>
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-8
; CHECK-NEXT: rdvl x10, #1
; CHECK-NEXT: sub x10, x10, #1 // =1
; CHECK-NEXT: sub x10, x10, #1
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: mov w9, #16
; CHECK-NEXT: cmp x10, #16 // =16
; CHECK-NEXT: cmp x10, #16
; CHECK-NEXT: st1w { z3.s }, p0, [x8, #3, mul vl]
; CHECK-NEXT: st1w { z2.s }, p0, [x8, #2, mul vl]
; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl]
Expand Down Expand Up @@ -687,7 +687,7 @@ define <vscale x 16 x i8> @splice_nxv16i8_clamped(<vscale x 16 x i8> %a, <vscale
; CHECK-NEXT: ptrue p0.b
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: mov w10, #17
; CHECK-NEXT: cmp x9, #17 // =17
; CHECK-NEXT: cmp x9, #17
; CHECK-NEXT: st1b { z0.b }, p0, [sp]
; CHECK-NEXT: st1b { z1.b }, p0, [x8, #1, mul vl]
; CHECK-NEXT: csel x9, x9, x10, lo
Expand Down Expand Up @@ -742,7 +742,7 @@ define <vscale x 8 x i16> @splice_nxv8i16_clamped(<vscale x 8 x i16> %a, <vscale
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: mov w10, #18
; CHECK-NEXT: cmp x9, #18 // =18
; CHECK-NEXT: cmp x9, #18
; CHECK-NEXT: st1h { z0.h }, p0, [sp]
; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl]
; CHECK-NEXT: csel x9, x9, x10, lo
Expand Down Expand Up @@ -797,7 +797,7 @@ define <vscale x 4 x i32> @splice_nxv4i32_clamped(<vscale x 4 x i32> %a, <vscale
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: mov w10, #20
; CHECK-NEXT: cmp x9, #20 // =20
; CHECK-NEXT: cmp x9, #20
; CHECK-NEXT: st1w { z0.s }, p0, [sp]
; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl]
; CHECK-NEXT: csel x9, x9, x10, lo
Expand Down Expand Up @@ -852,7 +852,7 @@ define <vscale x 2 x i64> @splice_nxv2i64_clamped(<vscale x 2 x i64> %a, <vscale
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: mov w10, #24
; CHECK-NEXT: cmp x9, #24 // =24
; CHECK-NEXT: cmp x9, #24
; CHECK-NEXT: st1d { z0.d }, p0, [sp]
; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl]
; CHECK-NEXT: csel x9, x9, x10, lo
Expand Down Expand Up @@ -907,7 +907,7 @@ define <vscale x 8 x half> @splice_nxv8f16_clamped(<vscale x 8 x half> %a, <vsca
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: mov w10, #18
; CHECK-NEXT: cmp x9, #18 // =18
; CHECK-NEXT: cmp x9, #18
; CHECK-NEXT: st1h { z0.h }, p0, [sp]
; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl]
; CHECK-NEXT: csel x9, x9, x10, lo
Expand Down Expand Up @@ -962,7 +962,7 @@ define <vscale x 4 x float> @splice_nxv4f32_clamped(<vscale x 4 x float> %a, <vs
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: mov w10, #20
; CHECK-NEXT: cmp x9, #20 // =20
; CHECK-NEXT: cmp x9, #20
; CHECK-NEXT: st1w { z0.s }, p0, [sp]
; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl]
; CHECK-NEXT: csel x9, x9, x10, lo
Expand Down Expand Up @@ -1017,7 +1017,7 @@ define <vscale x 2 x double> @splice_nxv2f64_clamped(<vscale x 2 x double> %a, <
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: mov w10, #24
; CHECK-NEXT: cmp x9, #24 // =24
; CHECK-NEXT: cmp x9, #24
; CHECK-NEXT: st1d { z0.d }, p0, [sp]
; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl]
; CHECK-NEXT: csel x9, x9, x10, lo
Expand Down Expand Up @@ -1130,7 +1130,7 @@ define <vscale x 8 x i32> @splice_nxv8i32(<vscale x 8 x i32> %a, <vscale x 8 x i
; CHECK-NEXT: st1w { z2.s }, p0, [x8, #2, mul vl]
; CHECK-NEXT: addvl x8, x8, #2
; CHECK-NEXT: ld1w { z0.s }, p0/z, [x8, x9, lsl #2]
; CHECK-NEXT: sub x8, x8, #32 // =32
; CHECK-NEXT: sub x8, x8, #32
; CHECK-NEXT: ld1w { z1.s }, p0/z, [x8, #1, mul vl]
; CHECK-NEXT: addvl sp, sp, #4
; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
Expand All @@ -1149,7 +1149,7 @@ define <vscale x 16 x float> @splice_nxv16f32_clamped(<vscale x 16 x float> %a,
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: mov w10, #68
; CHECK-NEXT: cmp x9, #68 // =68
; CHECK-NEXT: cmp x9, #68
; CHECK-NEXT: st1w { z3.s }, p0, [x8, #3, mul vl]
; CHECK-NEXT: st1w { z2.s }, p0, [x8, #2, mul vl]
; CHECK-NEXT: st1w { z1.s }, p0, [x8, #1, mul vl]
Expand Down
12 changes: 6 additions & 6 deletions llvm/test/CodeGen/AArch64/neg-abs.ll
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ declare i64 @llvm.abs.i64(i64, i1 immarg)
define i64 @neg_abs64(i64 %x) {
; CHECK-LABEL: neg_abs64:
; CHECK: // %bb.0:
; CHECK-NEXT: cmp x0, #0 // =0
; CHECK-NEXT: cmp x0, #0
; CHECK-NEXT: cneg x8, x0, mi
; CHECK-NEXT: neg x0, x8
; CHECK-NEXT: ret
Expand All @@ -21,7 +21,7 @@ declare i32 @llvm.abs.i32(i32, i1 immarg)
define i32 @neg_abs32(i32 %x) {
; CHECK-LABEL: neg_abs32:
; CHECK: // %bb.0:
; CHECK-NEXT: cmp w0, #0 // =0
; CHECK-NEXT: cmp w0, #0
; CHECK-NEXT: cneg w8, w0, mi
; CHECK-NEXT: neg w0, w8
; CHECK-NEXT: ret
Expand Down Expand Up @@ -66,7 +66,7 @@ define i128 @neg_abs128(i128 %x) {
define i64 @abs64(i64 %x) {
; CHECK-LABEL: abs64:
; CHECK: // %bb.0:
; CHECK-NEXT: cmp x0, #0 // =0
; CHECK-NEXT: cmp x0, #0
; CHECK-NEXT: cneg x0, x0, mi
; CHECK-NEXT: ret
%abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true)
Expand All @@ -76,7 +76,7 @@ define i64 @abs64(i64 %x) {
define i32 @abs32(i32 %x) {
; CHECK-LABEL: abs32:
; CHECK: // %bb.0:
; CHECK-NEXT: cmp w0, #0 // =0
; CHECK-NEXT: cmp w0, #0
; CHECK-NEXT: cneg w0, w0, mi
; CHECK-NEXT: ret
%abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true)
Expand All @@ -87,7 +87,7 @@ define i16 @abs16(i16 %x) {
; CHECK-LABEL: abs16:
; CHECK: // %bb.0:
; CHECK-NEXT: sxth w8, w0
; CHECK-NEXT: cmp w8, #0 // =0
; CHECK-NEXT: cmp w8, #0
; CHECK-NEXT: cneg w0, w8, mi
; CHECK-NEXT: ret
%abs = tail call i16 @llvm.abs.i16(i16 %x, i1 true)
Expand All @@ -99,7 +99,7 @@ define i128 @abs128(i128 %x) {
; CHECK: // %bb.0:
; CHECK-NEXT: negs x8, x0
; CHECK-NEXT: ngcs x9, x1
; CHECK-NEXT: cmp x1, #0 // =0
; CHECK-NEXT: cmp x1, #0
; CHECK-NEXT: csel x0, x8, x0, lt
; CHECK-NEXT: csel x1, x9, x1, lt
; CHECK-NEXT: ret
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/AArch64/pow.ll
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ define double @pow_f64_one_fourth_not_enough_fmf(double %x) nounwind {
define <4 x float> @pow_v4f32_one_fourth_not_enough_fmf(<4 x float> %x) nounwind {
; CHECK-LABEL: pow_v4f32_one_fourth_not_enough_fmf:
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #48 // =48
; CHECK-NEXT: sub sp, sp, #48
; CHECK-NEXT: str q0, [sp, #16] // 16-byte Folded Spill
; CHECK-NEXT: mov s0, v0.s[1]
; CHECK-NEXT: fmov s1, #0.25000000
Expand Down Expand Up @@ -100,7 +100,7 @@ define <4 x float> @pow_v4f32_one_fourth_not_enough_fmf(<4 x float> %x) nounwind
; CHECK-NEXT: // kill: def $s0 killed $s0 def $q0
; CHECK-NEXT: mov v1.s[3], v0.s[0]
; CHECK-NEXT: mov v0.16b, v1.16b
; CHECK-NEXT: add sp, sp, #48 // =48
; CHECK-NEXT: add sp, sp, #48
; CHECK-NEXT: ret
%r = call afn nsz <4 x float> @llvm.pow.v4f32(<4 x float> %x, <4 x float> <float 2.5e-1, float 2.5e-1, float 2.5e-01, float 2.5e-01>)
ret <4 x float> %r
Expand All @@ -109,7 +109,7 @@ define <4 x float> @pow_v4f32_one_fourth_not_enough_fmf(<4 x float> %x) nounwind
define <2 x double> @pow_v2f64_one_fourth_not_enough_fmf(<2 x double> %x) nounwind {
; CHECK-LABEL: pow_v2f64_one_fourth_not_enough_fmf:
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #48 // =48
; CHECK-NEXT: sub sp, sp, #48
; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
; CHECK-NEXT: mov d0, v0.d[1]
; CHECK-NEXT: fmov d1, #0.25000000
Expand All @@ -124,7 +124,7 @@ define <2 x double> @pow_v2f64_one_fourth_not_enough_fmf(<2 x double> %x) nounwi
; CHECK-NEXT: ldr x30, [sp, #32] // 8-byte Folded Reload
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: mov v0.d[1], v1.d[0]
; CHECK-NEXT: add sp, sp, #48 // =48
; CHECK-NEXT: add sp, sp, #48
; CHECK-NEXT: ret
%r = call nsz nnan reassoc <2 x double> @llvm.pow.v2f64(<2 x double> %x, <2 x double> <double 2.5e-1, double 2.5e-1>)
ret <2 x double> %r
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/AArch64/pr48188.ll
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
define void @test() nounwind {
; CHECK-LABEL: test:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: sub sp, sp, #16 // =16
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: mov x1, xzr
; CHECK-NEXT: mov x0, x1
; CHECK-NEXT: str x1, [sp] // 8-byte Folded Spill
Expand Down
6 changes: 3 additions & 3 deletions llvm/test/CodeGen/AArch64/ragreedy-local-interval-cost.ll
Original file line number Diff line number Diff line change
Expand Up @@ -138,10 +138,10 @@ define dso_local void @run_test() local_unnamed_addr #0 {
; CHECK-NEXT: add v28.2d, v28.2d, v14.2d
; CHECK-NEXT: fmov d14, x17
; CHECK-NEXT: mov v14.d[1], x16
; CHECK-NEXT: add x8, x8, #8 // =8
; CHECK-NEXT: add x8, x8, #8
; CHECK-NEXT: add v27.2d, v27.2d, v14.2d
; CHECK-NEXT: cmp x8, #64 // =64
; CHECK-NEXT: add x9, x9, #1 // =1
; CHECK-NEXT: cmp x8, #64
; CHECK-NEXT: add x9, x9, #1
; CHECK-NEXT: b.ne .LBB0_1
; CHECK-NEXT: // %bb.2: // %for.cond.cleanup
; CHECK-NEXT: adrp x8, C
Expand Down
12 changes: 6 additions & 6 deletions llvm/test/CodeGen/AArch64/sadd_sat.ll
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ define i32 @func(i32 %x, i32 %y) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: adds w8, w0, w1
; CHECK-NEXT: mov w9, #2147483647
; CHECK-NEXT: cmp w8, #0 // =0
; CHECK-NEXT: cmp w8, #0
; CHECK-NEXT: cinv w8, w9, ge
; CHECK-NEXT: adds w9, w0, w1
; CHECK-NEXT: csel w0, w8, w9, vs
Expand All @@ -27,7 +27,7 @@ define i64 @func2(i64 %x, i64 %y) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: adds x8, x0, x1
; CHECK-NEXT: mov x9, #9223372036854775807
; CHECK-NEXT: cmp x8, #0 // =0
; CHECK-NEXT: cmp x8, #0
; CHECK-NEXT: cinv x8, x9, ge
; CHECK-NEXT: adds x9, x0, x1
; CHECK-NEXT: csel x0, x8, x9, vs
Expand Down Expand Up @@ -58,9 +58,9 @@ define i8 @func8(i8 %x, i8 %y) nounwind {
; CHECK-NEXT: sxtb w8, w0
; CHECK-NEXT: add w8, w8, w1, sxtb
; CHECK-NEXT: mov w9, #127
; CHECK-NEXT: cmp w8, #127 // =127
; CHECK-NEXT: cmp w8, #127
; CHECK-NEXT: csel w8, w8, w9, lt
; CHECK-NEXT: cmn w8, #128 // =128
; CHECK-NEXT: cmn w8, #128
; CHECK-NEXT: mov w9, #-128
; CHECK-NEXT: csel w0, w8, w9, gt
; CHECK-NEXT: ret
Expand All @@ -75,9 +75,9 @@ define i4 @func3(i4 %x, i4 %y) nounwind {
; CHECK-NEXT: sbfx w9, w0, #0, #4
; CHECK-NEXT: add w8, w9, w8, asr #28
; CHECK-NEXT: mov w10, #7
; CHECK-NEXT: cmp w8, #7 // =7
; CHECK-NEXT: cmp w8, #7
; CHECK-NEXT: csel w8, w8, w10, lt
; CHECK-NEXT: cmn w8, #8 // =8
; CHECK-NEXT: cmn w8, #8
; CHECK-NEXT: mov w9, #-8
; CHECK-NEXT: csel w0, w8, w9, gt
; CHECK-NEXT: ret
Expand Down
12 changes: 6 additions & 6 deletions llvm/test/CodeGen/AArch64/sadd_sat_plus.ll
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
; CHECK-NEXT: mul w8, w1, w2
; CHECK-NEXT: adds w10, w0, w8
; CHECK-NEXT: mov w9, #2147483647
; CHECK-NEXT: cmp w10, #0 // =0
; CHECK-NEXT: cmp w10, #0
; CHECK-NEXT: cinv w9, w9, ge
; CHECK-NEXT: adds w8, w0, w8
; CHECK-NEXT: csel w0, w9, w8, vs
Expand All @@ -28,7 +28,7 @@ define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: adds x8, x0, x2
; CHECK-NEXT: mov x9, #9223372036854775807
; CHECK-NEXT: cmp x8, #0 // =0
; CHECK-NEXT: cmp x8, #0
; CHECK-NEXT: cinv x8, x9, ge
; CHECK-NEXT: adds x9, x0, x2
; CHECK-NEXT: csel x0, x8, x9, vs
Expand Down Expand Up @@ -63,9 +63,9 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
; CHECK-NEXT: mul w9, w1, w2
; CHECK-NEXT: add w8, w8, w9, sxtb
; CHECK-NEXT: mov w10, #127
; CHECK-NEXT: cmp w8, #127 // =127
; CHECK-NEXT: cmp w8, #127
; CHECK-NEXT: csel w8, w8, w10, lt
; CHECK-NEXT: cmn w8, #128 // =128
; CHECK-NEXT: cmn w8, #128
; CHECK-NEXT: mov w9, #-128
; CHECK-NEXT: csel w0, w8, w9, gt
; CHECK-NEXT: ret
Expand All @@ -82,9 +82,9 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
; CHECK-NEXT: lsl w9, w9, #28
; CHECK-NEXT: add w8, w8, w9, asr #28
; CHECK-NEXT: mov w10, #7
; CHECK-NEXT: cmp w8, #7 // =7
; CHECK-NEXT: cmp w8, #7
; CHECK-NEXT: csel w8, w8, w10, lt
; CHECK-NEXT: cmn w8, #8 // =8
; CHECK-NEXT: cmn w8, #8
; CHECK-NEXT: mov w9, #-8
; CHECK-NEXT: csel w0, w8, w9, gt
; CHECK-NEXT: ret
Expand Down
12 changes: 6 additions & 6 deletions llvm/test/CodeGen/AArch64/sadd_sat_vec.ll
Original file line number Diff line number Diff line change
Expand Up @@ -135,8 +135,8 @@ define void @v2i8(<2 x i8>* %px, <2 x i8>* %py, <2 x i8>* %pz) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ld1 { v0.b }[0], [x1]
; CHECK-NEXT: ld1 { v1.b }[0], [x0]
; CHECK-NEXT: add x8, x0, #1 // =1
; CHECK-NEXT: add x9, x1, #1 // =1
; CHECK-NEXT: add x8, x0, #1
; CHECK-NEXT: add x9, x1, #1
; CHECK-NEXT: ld1 { v0.b }[4], [x9]
; CHECK-NEXT: ld1 { v1.b }[4], [x8]
; CHECK-NEXT: shl v0.2s, v0.2s, #24
Expand Down Expand Up @@ -175,8 +175,8 @@ define void @v2i16(<2 x i16>* %px, <2 x i16>* %py, <2 x i16>* %pz) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ld1 { v0.h }[0], [x1]
; CHECK-NEXT: ld1 { v1.h }[0], [x0]
; CHECK-NEXT: add x8, x0, #2 // =2
; CHECK-NEXT: add x9, x1, #2 // =2
; CHECK-NEXT: add x8, x0, #2
; CHECK-NEXT: add x9, x1, #2
; CHECK-NEXT: ld1 { v0.h }[2], [x9]
; CHECK-NEXT: ld1 { v1.h }[2], [x8]
; CHECK-NEXT: shl v0.2s, v0.2s, #16
Expand Down Expand Up @@ -354,7 +354,7 @@ define <2 x i128> @v2i128(<2 x i128> %x, <2 x i128> %y) nounwind {
; CHECK-NEXT: adcs x12, x3, x7
; CHECK-NEXT: mov x9, #9223372036854775807
; CHECK-NEXT: eor x10, x3, x7
; CHECK-NEXT: cmp x12, #0 // =0
; CHECK-NEXT: cmp x12, #0
; CHECK-NEXT: eor x13, x3, x12
; CHECK-NEXT: cinv x14, x9, ge
; CHECK-NEXT: bics xzr, x13, x10
Expand All @@ -364,7 +364,7 @@ define <2 x i128> @v2i128(<2 x i128> %x, <2 x i128> %y) nounwind {
; CHECK-NEXT: adds x8, x0, x4
; CHECK-NEXT: adcs x10, x1, x5
; CHECK-NEXT: eor x11, x1, x5
; CHECK-NEXT: cmp x10, #0 // =0
; CHECK-NEXT: cmp x10, #0
; CHECK-NEXT: eor x12, x1, x10
; CHECK-NEXT: cinv x9, x9, ge
; CHECK-NEXT: bics xzr, x12, x11
Expand Down
32 changes: 16 additions & 16 deletions llvm/test/CodeGen/AArch64/sat-add.ll
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,10 @@ define i8 @unsigned_sat_constant_i8_using_min(i8 %x) {
; CHECK-LABEL: unsigned_sat_constant_i8_using_min:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, #0xff
; CHECK-NEXT: cmp w8, #213 // =213
; CHECK-NEXT: cmp w8, #213
; CHECK-NEXT: mov w8, #-43
; CHECK-NEXT: csel w8, w0, w8, lo
; CHECK-NEXT: add w0, w8, #42 // =42
; CHECK-NEXT: add w0, w8, #42
; CHECK-NEXT: ret
%c = icmp ult i8 %x, -43
%s = select i1 %c, i8 %x, i8 -43
Expand All @@ -25,7 +25,7 @@ define i8 @unsigned_sat_constant_i8_using_cmp_sum(i8 %x) {
; CHECK-LABEL: unsigned_sat_constant_i8_using_cmp_sum:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, #0xff
; CHECK-NEXT: add w8, w8, #42 // =42
; CHECK-NEXT: add w8, w8, #42
; CHECK-NEXT: tst w8, #0x100
; CHECK-NEXT: csinv w0, w8, wzr, eq
; CHECK-NEXT: ret
Expand All @@ -39,8 +39,8 @@ define i8 @unsigned_sat_constant_i8_using_cmp_notval(i8 %x) {
; CHECK-LABEL: unsigned_sat_constant_i8_using_cmp_notval:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, #0xff
; CHECK-NEXT: add w9, w0, #42 // =42
; CHECK-NEXT: cmp w8, #213 // =213
; CHECK-NEXT: add w9, w0, #42
; CHECK-NEXT: cmp w8, #213
; CHECK-NEXT: csinv w0, w9, wzr, ls
; CHECK-NEXT: ret
%a = add i8 %x, 42
Expand All @@ -56,7 +56,7 @@ define i16 @unsigned_sat_constant_i16_using_min(i16 %x) {
; CHECK-NEXT: cmp w8, w0, uxth
; CHECK-NEXT: mov w8, #-43
; CHECK-NEXT: csel w8, w0, w8, hi
; CHECK-NEXT: add w0, w8, #42 // =42
; CHECK-NEXT: add w0, w8, #42
; CHECK-NEXT: ret
%c = icmp ult i16 %x, -43
%s = select i1 %c, i16 %x, i16 -43
Expand All @@ -68,7 +68,7 @@ define i16 @unsigned_sat_constant_i16_using_cmp_sum(i16 %x) {
; CHECK-LABEL: unsigned_sat_constant_i16_using_cmp_sum:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, #0xffff
; CHECK-NEXT: add w8, w8, #42 // =42
; CHECK-NEXT: add w8, w8, #42
; CHECK-NEXT: tst w8, #0x10000
; CHECK-NEXT: csinv w0, w8, wzr, eq
; CHECK-NEXT: ret
Expand All @@ -82,7 +82,7 @@ define i16 @unsigned_sat_constant_i16_using_cmp_notval(i16 %x) {
; CHECK-LABEL: unsigned_sat_constant_i16_using_cmp_notval:
; CHECK: // %bb.0:
; CHECK-NEXT: mov w9, #65493
; CHECK-NEXT: add w8, w0, #42 // =42
; CHECK-NEXT: add w8, w0, #42
; CHECK-NEXT: cmp w9, w0, uxth
; CHECK-NEXT: csinv w0, w8, wzr, hs
; CHECK-NEXT: ret
Expand All @@ -95,10 +95,10 @@ define i16 @unsigned_sat_constant_i16_using_cmp_notval(i16 %x) {
define i32 @unsigned_sat_constant_i32_using_min(i32 %x) {
; CHECK-LABEL: unsigned_sat_constant_i32_using_min:
; CHECK: // %bb.0:
; CHECK-NEXT: cmn w0, #43 // =43
; CHECK-NEXT: cmn w0, #43
; CHECK-NEXT: mov w8, #-43
; CHECK-NEXT: csel w8, w0, w8, lo
; CHECK-NEXT: add w0, w8, #42 // =42
; CHECK-NEXT: add w0, w8, #42
; CHECK-NEXT: ret
%c = icmp ult i32 %x, -43
%s = select i1 %c, i32 %x, i32 -43
Expand All @@ -109,7 +109,7 @@ define i32 @unsigned_sat_constant_i32_using_min(i32 %x) {
define i32 @unsigned_sat_constant_i32_using_cmp_sum(i32 %x) {
; CHECK-LABEL: unsigned_sat_constant_i32_using_cmp_sum:
; CHECK: // %bb.0:
; CHECK-NEXT: adds w8, w0, #42 // =42
; CHECK-NEXT: adds w8, w0, #42
; CHECK-NEXT: csinv w0, w8, wzr, lo
; CHECK-NEXT: ret
%a = add i32 %x, 42
Expand All @@ -121,7 +121,7 @@ define i32 @unsigned_sat_constant_i32_using_cmp_sum(i32 %x) {
define i32 @unsigned_sat_constant_i32_using_cmp_notval(i32 %x) {
; CHECK-LABEL: unsigned_sat_constant_i32_using_cmp_notval:
; CHECK: // %bb.0:
; CHECK-NEXT: adds w8, w0, #42 // =42
; CHECK-NEXT: adds w8, w0, #42
; CHECK-NEXT: csinv w0, w8, wzr, lo
; CHECK-NEXT: ret
%a = add i32 %x, 42
Expand All @@ -133,10 +133,10 @@ define i32 @unsigned_sat_constant_i32_using_cmp_notval(i32 %x) {
define i64 @unsigned_sat_constant_i64_using_min(i64 %x) {
; CHECK-LABEL: unsigned_sat_constant_i64_using_min:
; CHECK: // %bb.0:
; CHECK-NEXT: cmn x0, #43 // =43
; CHECK-NEXT: cmn x0, #43
; CHECK-NEXT: mov x8, #-43
; CHECK-NEXT: csel x8, x0, x8, lo
; CHECK-NEXT: add x0, x8, #42 // =42
; CHECK-NEXT: add x0, x8, #42
; CHECK-NEXT: ret
%c = icmp ult i64 %x, -43
%s = select i1 %c, i64 %x, i64 -43
Expand All @@ -147,7 +147,7 @@ define i64 @unsigned_sat_constant_i64_using_min(i64 %x) {
define i64 @unsigned_sat_constant_i64_using_cmp_sum(i64 %x) {
; CHECK-LABEL: unsigned_sat_constant_i64_using_cmp_sum:
; CHECK: // %bb.0:
; CHECK-NEXT: adds x8, x0, #42 // =42
; CHECK-NEXT: adds x8, x0, #42
; CHECK-NEXT: csinv x0, x8, xzr, lo
; CHECK-NEXT: ret
%a = add i64 %x, 42
Expand All @@ -159,7 +159,7 @@ define i64 @unsigned_sat_constant_i64_using_cmp_sum(i64 %x) {
define i64 @unsigned_sat_constant_i64_using_cmp_notval(i64 %x) {
; CHECK-LABEL: unsigned_sat_constant_i64_using_cmp_notval:
; CHECK: // %bb.0:
; CHECK-NEXT: adds x8, x0, #42 // =42
; CHECK-NEXT: adds x8, x0, #42
; CHECK-NEXT: csinv x0, x8, xzr, lo
; CHECK-NEXT: ret
%a = add i64 %x, 42
Expand Down
32 changes: 16 additions & 16 deletions llvm/test/CodeGen/AArch64/sdivpow2.ll
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,8 @@
define i32 @test1(i32 %x) {
; CHECK-LABEL: test1:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w0, #7 // =7
; CHECK-NEXT: cmp w0, #0 // =0
; CHECK-NEXT: add w8, w0, #7
; CHECK-NEXT: cmp w0, #0
; CHECK-NEXT: csel w8, w8, w0, lt
; CHECK-NEXT: asr w0, w8, #3
; CHECK-NEXT: ret
Expand All @@ -17,8 +17,8 @@ define i32 @test1(i32 %x) {
define i32 @test2(i32 %x) {
; CHECK-LABEL: test2:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w0, #7 // =7
; CHECK-NEXT: cmp w0, #0 // =0
; CHECK-NEXT: add w8, w0, #7
; CHECK-NEXT: cmp w0, #0
; CHECK-NEXT: csel w8, w8, w0, lt
; CHECK-NEXT: neg w0, w8, asr #3
; CHECK-NEXT: ret
Expand All @@ -29,8 +29,8 @@ define i32 @test2(i32 %x) {
define i32 @test3(i32 %x) {
; CHECK-LABEL: test3:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w0, #31 // =31
; CHECK-NEXT: cmp w0, #0 // =0
; CHECK-NEXT: add w8, w0, #31
; CHECK-NEXT: cmp w0, #0
; CHECK-NEXT: csel w8, w8, w0, lt
; CHECK-NEXT: asr w0, w8, #5
; CHECK-NEXT: ret
Expand All @@ -41,8 +41,8 @@ define i32 @test3(i32 %x) {
define i64 @test4(i64 %x) {
; CHECK-LABEL: test4:
; CHECK: // %bb.0:
; CHECK-NEXT: add x8, x0, #7 // =7
; CHECK-NEXT: cmp x0, #0 // =0
; CHECK-NEXT: add x8, x0, #7
; CHECK-NEXT: cmp x0, #0
; CHECK-NEXT: csel x8, x8, x0, lt
; CHECK-NEXT: asr x0, x8, #3
; CHECK-NEXT: ret
Expand All @@ -53,8 +53,8 @@ define i64 @test4(i64 %x) {
define i64 @test5(i64 %x) {
; CHECK-LABEL: test5:
; CHECK: // %bb.0:
; CHECK-NEXT: add x8, x0, #7 // =7
; CHECK-NEXT: cmp x0, #0 // =0
; CHECK-NEXT: add x8, x0, #7
; CHECK-NEXT: cmp x0, #0
; CHECK-NEXT: csel x8, x8, x0, lt
; CHECK-NEXT: neg x0, x8, asr #3
; CHECK-NEXT: ret
Expand All @@ -65,8 +65,8 @@ define i64 @test5(i64 %x) {
define i64 @test6(i64 %x) {
; CHECK-LABEL: test6:
; CHECK: // %bb.0:
; CHECK-NEXT: add x8, x0, #63 // =63
; CHECK-NEXT: cmp x0, #0 // =0
; CHECK-NEXT: add x8, x0, #63
; CHECK-NEXT: cmp x0, #0
; CHECK-NEXT: csel x8, x8, x0, lt
; CHECK-NEXT: asr x0, x8, #6
; CHECK-NEXT: ret
Expand All @@ -79,7 +79,7 @@ define i64 @test7(i64 %x) {
; CHECK: // %bb.0:
; CHECK-NEXT: mov x8, #281474976710655
; CHECK-NEXT: add x8, x0, x8
; CHECK-NEXT: cmp x0, #0 // =0
; CHECK-NEXT: cmp x0, #0
; CHECK-NEXT: csel x8, x8, x0, lt
; CHECK-NEXT: asr x0, x8, #48
; CHECK-NEXT: ret
Expand All @@ -90,15 +90,15 @@ define i64 @test7(i64 %x) {
define i64 @test8(i64 %x) {
; ISEL-LABEL: test8:
; ISEL: // %bb.0:
; ISEL-NEXT: cmp x0, #0 // =0
; ISEL-NEXT: cmp x0, #0
; ISEL-NEXT: cinc x8, x0, lt
; ISEL-NEXT: asr x0, x8, #1
; ISEL-NEXT: ret
;
; FAST-LABEL: test8:
; FAST: // %bb.0:
; FAST-NEXT: add x8, x0, #1 // =1
; FAST-NEXT: cmp x0, #0 // =0
; FAST-NEXT: add x8, x0, #1
; FAST-NEXT: cmp x0, #0
; FAST-NEXT: csel x8, x8, x0, lt
; FAST-NEXT: asr x0, x8, #1
; FAST-NEXT: ret
Expand Down
10 changes: 5 additions & 5 deletions llvm/test/CodeGen/AArch64/select_const.ll
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ define i32 @select_0_or_neg1(i1 %cond) {
; CHECK-LABEL: select_0_or_neg1:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, #0x1
; CHECK-NEXT: sub w0, w8, #1 // =1
; CHECK-NEXT: sub w0, w8, #1
; CHECK-NEXT: ret
%sel = select i1 %cond, i32 0, i32 -1
ret i32 %sel
Expand All @@ -78,7 +78,7 @@ define i32 @select_0_or_neg1(i1 %cond) {
define i32 @select_0_or_neg1_zeroext(i1 zeroext %cond) {
; CHECK-LABEL: select_0_or_neg1_zeroext:
; CHECK: // %bb.0:
; CHECK-NEXT: sub w0, w0, #1 // =1
; CHECK-NEXT: sub w0, w0, #1
; CHECK-NEXT: ret
%sel = select i1 %cond, i32 0, i32 -1
ret i32 %sel
Expand Down Expand Up @@ -137,7 +137,7 @@ define i32 @select_Cplus1_C(i1 %cond) {
define i32 @select_Cplus1_C_zeroext(i1 zeroext %cond) {
; CHECK-LABEL: select_Cplus1_C_zeroext:
; CHECK: // %bb.0:
; CHECK-NEXT: cmp w0, #0 // =0
; CHECK-NEXT: cmp w0, #0
; CHECK-NEXT: mov w8, #41
; CHECK-NEXT: cinc w0, w8, ne
; CHECK-NEXT: ret
Expand Down Expand Up @@ -172,7 +172,7 @@ define i32 @select_C_Cplus1(i1 %cond) {
define i32 @select_C_Cplus1_zeroext(i1 zeroext %cond) {
; CHECK-LABEL: select_C_Cplus1_zeroext:
; CHECK: // %bb.0:
; CHECK-NEXT: cmp w0, #0 // =0
; CHECK-NEXT: cmp w0, #0
; CHECK-NEXT: mov w8, #41
; CHECK-NEXT: cinc w0, w8, eq
; CHECK-NEXT: ret
Expand Down Expand Up @@ -209,7 +209,7 @@ define i32 @select_C1_C2(i1 %cond) {
define i32 @select_C1_C2_zeroext(i1 zeroext %cond) {
; CHECK-LABEL: select_C1_C2_zeroext:
; CHECK: // %bb.0:
; CHECK-NEXT: cmp w0, #0 // =0
; CHECK-NEXT: cmp w0, #0
; CHECK-NEXT: mov w8, #42
; CHECK-NEXT: mov w9, #421
; CHECK-NEXT: csel w0, w9, w8, ne
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/AArch64/shift-mod.ll
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ entry:
define i64 @ashr_add_shl_i32(i64 %r) {
; CHECK-LABEL: ashr_add_shl_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w0, #1 // =1
; CHECK-NEXT: add w8, w0, #1
; CHECK-NEXT: sxtw x0, w8
; CHECK-NEXT: ret
%conv = shl i64 %r, 32
Expand All @@ -90,7 +90,7 @@ define i64 @ashr_add_shl_i32(i64 %r) {
define i64 @ashr_add_shl_i8(i64 %r) {
; CHECK-LABEL: ashr_add_shl_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w0, #1 // =1
; CHECK-NEXT: add w8, w0, #1
; CHECK-NEXT: sxtb x0, w8
; CHECK-NEXT: ret
%conv = shl i64 %r, 56
Expand Down
20 changes: 10 additions & 10 deletions llvm/test/CodeGen/AArch64/signbit-shift.ll
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ define i32 @add_zext_ifpos(i32 %x) {
; CHECK-LABEL: add_zext_ifpos:
; CHECK: // %bb.0:
; CHECK-NEXT: asr w8, w0, #31
; CHECK-NEXT: add w0, w8, #42 // =42
; CHECK-NEXT: add w0, w8, #42
; CHECK-NEXT: ret
%c = icmp sgt i32 %x, -1
%e = zext i1 %c to i32
Expand All @@ -43,7 +43,7 @@ define <4 x i32> @add_zext_ifpos_vec_splat(<4 x i32> %x) {
define i32 @sel_ifpos_tval_bigger(i32 %x) {
; CHECK-LABEL: sel_ifpos_tval_bigger:
; CHECK: // %bb.0:
; CHECK-NEXT: cmp w0, #0 // =0
; CHECK-NEXT: cmp w0, #0
; CHECK-NEXT: mov w8, #41
; CHECK-NEXT: cinc w0, w8, ge
; CHECK-NEXT: ret
Expand All @@ -67,7 +67,7 @@ define i32 @add_sext_ifpos(i32 %x) {
; CHECK-LABEL: add_sext_ifpos:
; CHECK: // %bb.0:
; CHECK-NEXT: lsr w8, w0, #31
; CHECK-NEXT: add w0, w8, #41 // =41
; CHECK-NEXT: add w0, w8, #41
; CHECK-NEXT: ret
%c = icmp sgt i32 %x, -1
%e = sext i1 %c to i32
Expand All @@ -92,7 +92,7 @@ define <4 x i32> @add_sext_ifpos_vec_splat(<4 x i32> %x) {
define i32 @sel_ifpos_fval_bigger(i32 %x) {
; CHECK-LABEL: sel_ifpos_fval_bigger:
; CHECK: // %bb.0:
; CHECK-NEXT: cmp w0, #0 // =0
; CHECK-NEXT: cmp w0, #0
; CHECK-NEXT: mov w8, #41
; CHECK-NEXT: cinc w0, w8, lt
; CHECK-NEXT: ret
Expand All @@ -117,7 +117,7 @@ define i32 @add_zext_ifneg(i32 %x) {
; CHECK-LABEL: add_zext_ifneg:
; CHECK: // %bb.0:
; CHECK-NEXT: lsr w8, w0, #31
; CHECK-NEXT: add w0, w8, #41 // =41
; CHECK-NEXT: add w0, w8, #41
; CHECK-NEXT: ret
%c = icmp slt i32 %x, 0
%e = zext i1 %c to i32
Expand All @@ -128,7 +128,7 @@ define i32 @add_zext_ifneg(i32 %x) {
define i32 @sel_ifneg_tval_bigger(i32 %x) {
; CHECK-LABEL: sel_ifneg_tval_bigger:
; CHECK: // %bb.0:
; CHECK-NEXT: cmp w0, #0 // =0
; CHECK-NEXT: cmp w0, #0
; CHECK-NEXT: mov w8, #41
; CHECK-NEXT: cinc w0, w8, lt
; CHECK-NEXT: ret
Expand All @@ -151,7 +151,7 @@ define i32 @add_sext_ifneg(i32 %x) {
; CHECK-LABEL: add_sext_ifneg:
; CHECK: // %bb.0:
; CHECK-NEXT: asr w8, w0, #31
; CHECK-NEXT: add w0, w8, #42 // =42
; CHECK-NEXT: add w0, w8, #42
; CHECK-NEXT: ret
%c = icmp slt i32 %x, 0
%e = sext i1 %c to i32
Expand All @@ -162,7 +162,7 @@ define i32 @add_sext_ifneg(i32 %x) {
define i32 @sel_ifneg_fval_bigger(i32 %x) {
; CHECK-LABEL: sel_ifneg_fval_bigger:
; CHECK: // %bb.0:
; CHECK-NEXT: cmp w0, #0 // =0
; CHECK-NEXT: cmp w0, #0
; CHECK-NEXT: mov w8, #41
; CHECK-NEXT: cinc w0, w8, ge
; CHECK-NEXT: ret
Expand All @@ -175,7 +175,7 @@ define i32 @add_lshr_not(i32 %x) {
; CHECK-LABEL: add_lshr_not:
; CHECK: // %bb.0:
; CHECK-NEXT: asr w8, w0, #31
; CHECK-NEXT: add w0, w8, #42 // =42
; CHECK-NEXT: add w0, w8, #42
; CHECK-NEXT: ret
%not = xor i32 %x, -1
%sh = lshr i32 %not, 31
Expand Down Expand Up @@ -247,7 +247,7 @@ define i32 @sub_const_op_lshr(i32 %x) {
; CHECK-LABEL: sub_const_op_lshr:
; CHECK: // %bb.0:
; CHECK-NEXT: asr w8, w0, #31
; CHECK-NEXT: add w0, w8, #43 // =43
; CHECK-NEXT: add w0, w8, #43
; CHECK-NEXT: ret
%sh = lshr i32 %x, 31
%r = sub i32 43, %sh
Expand Down
26 changes: 13 additions & 13 deletions llvm/test/CodeGen/AArch64/signed-truncation-check.ll
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,7 @@ define i1 @add_ultcmp_bad_i16_i8_add(i16 %x, i16 %y) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w0, w1
; CHECK-NEXT: and w8, w8, #0xffff
; CHECK-NEXT: cmp w8, #256 // =256
; CHECK-NEXT: cmp w8, #256
; CHECK-NEXT: cset w0, lo
; CHECK-NEXT: ret
%tmp0 = add i16 %x, %y
Expand All @@ -285,7 +285,7 @@ define i1 @add_ultcmp_bad_i16_i8_add(i16 %x, i16 %y) nounwind {
define i1 @add_ultcmp_bad_i16_i8_cmp(i16 %x, i16 %y) nounwind {
; CHECK-LABEL: add_ultcmp_bad_i16_i8_cmp:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w0, #128 // =128
; CHECK-NEXT: add w8, w0, #128
; CHECK-NEXT: and w8, w8, #0xffff
; CHECK-NEXT: cmp w8, w1, uxth
; CHECK-NEXT: cset w0, lo
Expand All @@ -300,7 +300,7 @@ define i1 @add_ultcmp_bad_i8_i16(i16 %x) nounwind {
; CHECK-LABEL: add_ultcmp_bad_i8_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, #0xffff
; CHECK-NEXT: add w8, w8, #128 // =128
; CHECK-NEXT: add w8, w8, #128
; CHECK-NEXT: lsr w0, w8, #16
; CHECK-NEXT: ret
%tmp0 = add i16 %x, 128 ; 1U << (8-1)
Expand All @@ -312,9 +312,9 @@ define i1 @add_ultcmp_bad_i8_i16(i16 %x) nounwind {
define i1 @add_ultcmp_bad_i16_i8_c0notpoweroftwo(i16 %x) nounwind {
; CHECK-LABEL: add_ultcmp_bad_i16_i8_c0notpoweroftwo:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w0, #192 // =192
; CHECK-NEXT: add w8, w0, #192
; CHECK-NEXT: and w8, w8, #0xffff
; CHECK-NEXT: cmp w8, #256 // =256
; CHECK-NEXT: cmp w8, #256
; CHECK-NEXT: cset w0, lo
; CHECK-NEXT: ret
%tmp0 = add i16 %x, 192 ; (1U << (8-1)) + (1U << (8-1-1))
Expand All @@ -326,9 +326,9 @@ define i1 @add_ultcmp_bad_i16_i8_c0notpoweroftwo(i16 %x) nounwind {
define i1 @add_ultcmp_bad_i16_i8_c1notpoweroftwo(i16 %x) nounwind {
; CHECK-LABEL: add_ultcmp_bad_i16_i8_c1notpoweroftwo:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w0, #128 // =128
; CHECK-NEXT: add w8, w0, #128
; CHECK-NEXT: and w8, w8, #0xffff
; CHECK-NEXT: cmp w8, #768 // =768
; CHECK-NEXT: cmp w8, #768
; CHECK-NEXT: cset w0, lo
; CHECK-NEXT: ret
%tmp0 = add i16 %x, 128 ; 1U << (8-1)
Expand All @@ -340,9 +340,9 @@ define i1 @add_ultcmp_bad_i16_i8_c1notpoweroftwo(i16 %x) nounwind {
define i1 @add_ultcmp_bad_i16_i8_magic(i16 %x) nounwind {
; CHECK-LABEL: add_ultcmp_bad_i16_i8_magic:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w0, #64 // =64
; CHECK-NEXT: add w8, w0, #64
; CHECK-NEXT: and w8, w8, #0xffff
; CHECK-NEXT: cmp w8, #256 // =256
; CHECK-NEXT: cmp w8, #256
; CHECK-NEXT: cset w0, lo
; CHECK-NEXT: ret
%tmp0 = add i16 %x, 64 ; 1U << (8-1-1)
Expand All @@ -354,9 +354,9 @@ define i1 @add_ultcmp_bad_i16_i8_magic(i16 %x) nounwind {
define i1 @add_ultcmp_bad_i16_i4(i16 %x) nounwind {
; CHECK-LABEL: add_ultcmp_bad_i16_i4:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w0, #8 // =8
; CHECK-NEXT: add w8, w0, #8
; CHECK-NEXT: and w8, w8, #0xffff
; CHECK-NEXT: cmp w8, #16 // =16
; CHECK-NEXT: cmp w8, #16
; CHECK-NEXT: cset w0, lo
; CHECK-NEXT: ret
%tmp0 = add i16 %x, 8 ; 1U << (4-1)
Expand All @@ -368,9 +368,9 @@ define i1 @add_ultcmp_bad_i16_i4(i16 %x) nounwind {
define i1 @add_ultcmp_bad_i24_i8(i24 %x) nounwind {
; CHECK-LABEL: add_ultcmp_bad_i24_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w0, #128 // =128
; CHECK-NEXT: add w8, w0, #128
; CHECK-NEXT: and w8, w8, #0xffffff
; CHECK-NEXT: cmp w8, #256 // =256
; CHECK-NEXT: cmp w8, #256
; CHECK-NEXT: cset w0, lo
; CHECK-NEXT: ret
%tmp0 = add i24 %x, 128 ; 1U << (8-1)
Expand Down
22 changes: 11 additions & 11 deletions llvm/test/CodeGen/AArch64/sink-addsub-of-const.ll
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ define i32 @sink_add_of_const_to_add0(i32 %a, i32 %b) {
; CHECK-LABEL: sink_add_of_const_to_add0:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w0, w1
; CHECK-NEXT: add w0, w8, #32 // =32
; CHECK-NEXT: add w0, w8, #32
; CHECK-NEXT: ret
%t0 = add i32 %a, 32 ; constant always on RHS
%r = add i32 %t0, %b
Expand All @@ -20,7 +20,7 @@ define i32 @sink_add_of_const_to_add1(i32 %a, i32 %b) {
; CHECK-LABEL: sink_add_of_const_to_add1:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w0, w1
; CHECK-NEXT: add w0, w8, #32 // =32
; CHECK-NEXT: add w0, w8, #32
; CHECK-NEXT: ret
%t0 = add i32 %a, 32 ; constant always on RHS
%r = add i32 %b, %t0
Expand All @@ -34,7 +34,7 @@ define i32 @sink_sub_of_const_to_add0(i32 %a, i32 %b) {
; CHECK-LABEL: sink_sub_of_const_to_add0:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w0, w1
; CHECK-NEXT: sub w0, w8, #32 // =32
; CHECK-NEXT: sub w0, w8, #32
; CHECK-NEXT: ret
%t0 = sub i32 %a, 32
%r = add i32 %t0, %b
Expand All @@ -44,7 +44,7 @@ define i32 @sink_sub_of_const_to_add1(i32 %a, i32 %b) {
; CHECK-LABEL: sink_sub_of_const_to_add1:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w0, w1
; CHECK-NEXT: sub w0, w8, #32 // =32
; CHECK-NEXT: sub w0, w8, #32
; CHECK-NEXT: ret
%t0 = sub i32 %a, 32
%r = add i32 %b, %t0
Expand All @@ -58,7 +58,7 @@ define i32 @sink_sub_from_const_to_add0(i32 %a, i32 %b) {
; CHECK-LABEL: sink_sub_from_const_to_add0:
; CHECK: // %bb.0:
; CHECK-NEXT: sub w8, w1, w0
; CHECK-NEXT: add w0, w8, #32 // =32
; CHECK-NEXT: add w0, w8, #32
; CHECK-NEXT: ret
%t0 = sub i32 32, %a
%r = add i32 %t0, %b
Expand All @@ -68,7 +68,7 @@ define i32 @sink_sub_from_const_to_add1(i32 %a, i32 %b) {
; CHECK-LABEL: sink_sub_from_const_to_add1:
; CHECK: // %bb.0:
; CHECK-NEXT: sub w8, w1, w0
; CHECK-NEXT: add w0, w8, #32 // =32
; CHECK-NEXT: add w0, w8, #32
; CHECK-NEXT: ret
%t0 = sub i32 32, %a
%r = add i32 %b, %t0
Expand All @@ -82,7 +82,7 @@ define i32 @sink_add_of_const_to_sub(i32 %a, i32 %b) {
; CHECK-LABEL: sink_add_of_const_to_sub:
; CHECK: // %bb.0:
; CHECK-NEXT: sub w8, w0, w1
; CHECK-NEXT: add w0, w8, #32 // =32
; CHECK-NEXT: add w0, w8, #32
; CHECK-NEXT: ret
%t0 = add i32 %a, 32 ; constant always on RHS
%r = sub i32 %t0, %b
Expand All @@ -92,7 +92,7 @@ define i32 @sink_add_of_const_to_sub2(i32 %a, i32 %b) {
; CHECK-LABEL: sink_add_of_const_to_sub2:
; CHECK: // %bb.0:
; CHECK-NEXT: sub w8, w1, w0
; CHECK-NEXT: sub w0, w8, #32 // =32
; CHECK-NEXT: sub w0, w8, #32
; CHECK-NEXT: ret
%t0 = add i32 %a, 32 ; constant always on RHS
%r = sub i32 %b, %t0
Expand All @@ -106,7 +106,7 @@ define i32 @sink_sub_of_const_to_sub(i32 %a, i32 %b) {
; CHECK-LABEL: sink_sub_of_const_to_sub:
; CHECK: // %bb.0:
; CHECK-NEXT: sub w8, w0, w1
; CHECK-NEXT: sub w0, w8, #32 // =32
; CHECK-NEXT: sub w0, w8, #32
; CHECK-NEXT: ret
%t0 = sub i32 %a, 32
%r = sub i32 %t0, %b
Expand All @@ -116,7 +116,7 @@ define i32 @sink_sub_of_const_to_sub2(i32 %a, i32 %b) {
; CHECK-LABEL: sink_sub_of_const_to_sub2:
; CHECK: // %bb.0:
; CHECK-NEXT: sub w8, w1, w0
; CHECK-NEXT: add w0, w8, #32 // =32
; CHECK-NEXT: add w0, w8, #32
; CHECK-NEXT: ret
%t0 = sub i32 %a, 32
%r = sub i32 %b, %t0
Expand All @@ -141,7 +141,7 @@ define i32 @sink_sub_from_const_to_sub2(i32 %a, i32 %b) {
; CHECK-LABEL: sink_sub_from_const_to_sub2:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w0, w1
; CHECK-NEXT: sub w0, w8, #32 // =32
; CHECK-NEXT: sub w0, w8, #32
; CHECK-NEXT: ret
%t0 = sub i32 32, %a
%r = sub i32 %b, %t0
Expand Down
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/AArch64/split-vector-insert.ll
Original file line number Diff line number Diff line change
Expand Up @@ -24,26 +24,26 @@ define <vscale x 2 x i64> @test_nxv2i64_v8i64(<vscale x 2 x i64> %a, <8 x i64> %
; CHECK-NEXT: cntd x9
; CHECK-NEXT: st1d { z0.d }, p0, [sp]
; CHECK-NEXT: str q1, [sp]
; CHECK-NEXT: sub x9, x9, #2 // =2
; CHECK-NEXT: sub x9, x9, #2
; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp]
; CHECK-NEXT: mov w8, #2
; CHECK-NEXT: cmp x9, #2 // =2
; CHECK-NEXT: cmp x9, #2
; CHECK-NEXT: csel x8, x9, x8, lo
; CHECK-NEXT: addvl x10, sp, #1
; CHECK-NEXT: lsl x8, x8, #3
; CHECK-NEXT: st1d { z0.d }, p0, [sp, #1, mul vl]
; CHECK-NEXT: str q2, [x10, x8]
; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp, #1, mul vl]
; CHECK-NEXT: mov w8, #4
; CHECK-NEXT: cmp x9, #4 // =4
; CHECK-NEXT: cmp x9, #4
; CHECK-NEXT: csel x8, x9, x8, lo
; CHECK-NEXT: addvl x10, sp, #2
; CHECK-NEXT: lsl x8, x8, #3
; CHECK-NEXT: st1d { z0.d }, p0, [sp, #2, mul vl]
; CHECK-NEXT: str q3, [x10, x8]
; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp, #2, mul vl]
; CHECK-NEXT: mov w8, #6
; CHECK-NEXT: cmp x9, #6 // =6
; CHECK-NEXT: cmp x9, #6
; CHECK-NEXT: csel x8, x9, x8, lo
; CHECK-NEXT: addvl x10, sp, #3
; CHECK-NEXT: lsl x8, x8, #3
Expand Down Expand Up @@ -74,26 +74,26 @@ define <vscale x 2 x double> @test_nxv2f64_v8f64(<vscale x 2 x double> %a, <8 x
; CHECK-NEXT: cntd x9
; CHECK-NEXT: st1d { z0.d }, p0, [sp]
; CHECK-NEXT: str q1, [sp]
; CHECK-NEXT: sub x9, x9, #2 // =2
; CHECK-NEXT: sub x9, x9, #2
; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp]
; CHECK-NEXT: mov w8, #2
; CHECK-NEXT: cmp x9, #2 // =2
; CHECK-NEXT: cmp x9, #2
; CHECK-NEXT: csel x8, x9, x8, lo
; CHECK-NEXT: addvl x10, sp, #1
; CHECK-NEXT: lsl x8, x8, #3
; CHECK-NEXT: st1d { z0.d }, p0, [sp, #1, mul vl]
; CHECK-NEXT: str q2, [x10, x8]
; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp, #1, mul vl]
; CHECK-NEXT: mov w8, #4
; CHECK-NEXT: cmp x9, #4 // =4
; CHECK-NEXT: cmp x9, #4
; CHECK-NEXT: csel x8, x9, x8, lo
; CHECK-NEXT: addvl x10, sp, #2
; CHECK-NEXT: lsl x8, x8, #3
; CHECK-NEXT: st1d { z0.d }, p0, [sp, #2, mul vl]
; CHECK-NEXT: str q3, [x10, x8]
; CHECK-NEXT: ld1d { z0.d }, p0/z, [sp, #2, mul vl]
; CHECK-NEXT: mov w8, #6
; CHECK-NEXT: cmp x9, #6 // =6
; CHECK-NEXT: cmp x9, #6
; CHECK-NEXT: csel x8, x9, x8, lo
; CHECK-NEXT: addvl x10, sp, #3
; CHECK-NEXT: lsl x8, x8, #3
Expand Down
6 changes: 3 additions & 3 deletions llvm/test/CodeGen/AArch64/srem-lkk.ll
Original file line number Diff line number Diff line change
Expand Up @@ -95,8 +95,8 @@ define i32 @combine_srem_sdiv(i32 %x) {
define i32 @dont_fold_srem_power_of_two(i32 %x) {
; CHECK-LABEL: dont_fold_srem_power_of_two:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w0, #63 // =63
; CHECK-NEXT: cmp w0, #0 // =0
; CHECK-NEXT: add w8, w0, #63
; CHECK-NEXT: cmp w0, #0
; CHECK-NEXT: csel w8, w8, w0, lt
; CHECK-NEXT: and w8, w8, #0xffffffc0
; CHECK-NEXT: sub w0, w0, w8
Expand All @@ -121,7 +121,7 @@ define i32 @dont_fold_srem_i32_smax(i32 %x) {
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #2147483647
; CHECK-NEXT: add w8, w0, w8
; CHECK-NEXT: cmp w0, #0 // =0
; CHECK-NEXT: cmp w0, #0
; CHECK-NEXT: csel w8, w8, w0, lt
; CHECK-NEXT: and w8, w8, #0x80000000
; CHECK-NEXT: add w0, w0, w8
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/AArch64/srem-seteq-illegal-types.ll
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ define i1 @test_srem_even(i4 %X) nounwind {
; CHECK-NEXT: add w9, w9, w10
; CHECK-NEXT: mov w10, #6
; CHECK-NEXT: msub w8, w9, w10, w8
; CHECK-NEXT: cmp w8, #1 // =1
; CHECK-NEXT: cmp w8, #1
; CHECK-NEXT: cset w0, eq
; CHECK-NEXT: ret
%srem = srem i4 %X, 6
Expand Down
14 changes: 7 additions & 7 deletions llvm/test/CodeGen/AArch64/srem-seteq.ll
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ define i32 @test_srem_odd_bit30(i32 %X) nounwind {
; CHECK-NEXT: movk w8, #27306, lsl #16
; CHECK-NEXT: orr w9, wzr, #0x1
; CHECK-NEXT: madd w8, w0, w8, w9
; CHECK-NEXT: cmp w8, #3 // =3
; CHECK-NEXT: cmp w8, #3
; CHECK-NEXT: cset w0, lo
; CHECK-NEXT: ret
%srem = srem i32 %X, 1073741827
Expand All @@ -67,7 +67,7 @@ define i32 @test_srem_odd_bit31(i32 %X) nounwind {
; CHECK-NEXT: movk w8, #54613, lsl #16
; CHECK-NEXT: orr w9, wzr, #0x1
; CHECK-NEXT: madd w8, w0, w8, w9
; CHECK-NEXT: cmp w8, #3 // =3
; CHECK-NEXT: cmp w8, #3
; CHECK-NEXT: cset w0, lo
; CHECK-NEXT: ret
%srem = srem i32 %X, 2147483651
Expand Down Expand Up @@ -126,7 +126,7 @@ define i32 @test_srem_even_bit30(i32 %X) nounwind {
; CHECK-NEXT: orr w9, wzr, #0x8
; CHECK-NEXT: madd w8, w0, w8, w9
; CHECK-NEXT: ror w8, w8, #3
; CHECK-NEXT: cmp w8, #3 // =3
; CHECK-NEXT: cmp w8, #3
; CHECK-NEXT: cset w0, lo
; CHECK-NEXT: ret
%srem = srem i32 %X, 1073741928
Expand All @@ -144,7 +144,7 @@ define i32 @test_srem_even_bit31(i32 %X) nounwind {
; CHECK-NEXT: orr w9, wzr, #0x2
; CHECK-NEXT: madd w8, w0, w8, w9
; CHECK-NEXT: ror w8, w8, #1
; CHECK-NEXT: cmp w8, #3 // =3
; CHECK-NEXT: cmp w8, #3
; CHECK-NEXT: cset w0, lo
; CHECK-NEXT: ret
%srem = srem i32 %X, 2147483750
Expand Down Expand Up @@ -234,8 +234,8 @@ define i32 @test_srem_one(i32 %X) nounwind {
define i32 @test_srem_pow2(i32 %X) nounwind {
; CHECK-LABEL: test_srem_pow2:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w0, #15 // =15
; CHECK-NEXT: cmp w0, #0 // =0
; CHECK-NEXT: add w8, w0, #15
; CHECK-NEXT: cmp w0, #0
; CHECK-NEXT: csel w8, w8, w0, lt
; CHECK-NEXT: and w8, w8, #0xfffffff0
; CHECK-NEXT: cmp w0, w8
Expand All @@ -253,7 +253,7 @@ define i32 @test_srem_int_min(i32 %X) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: mov w8, #2147483647
; CHECK-NEXT: add w8, w0, w8
; CHECK-NEXT: cmp w0, #0 // =0
; CHECK-NEXT: cmp w0, #0
; CHECK-NEXT: csel w8, w8, w0, lt
; CHECK-NEXT: and w8, w8, #0x80000000
; CHECK-NEXT: cmn w0, w8
Expand Down
14 changes: 7 additions & 7 deletions llvm/test/CodeGen/AArch64/srem-vector-lkk.ll
Original file line number Diff line number Diff line change
Expand Up @@ -157,18 +157,18 @@ define <4 x i16> @dont_fold_srem_power_of_two(<4 x i16> %x) {
; CHECK: // %bb.0:
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: smov w8, v0.h[1]
; CHECK-NEXT: add w12, w8, #31 // =31
; CHECK-NEXT: cmp w8, #0 // =0
; CHECK-NEXT: add w12, w8, #31
; CHECK-NEXT: cmp w8, #0
; CHECK-NEXT: mov w11, #37253
; CHECK-NEXT: csel w12, w12, w8, lt
; CHECK-NEXT: smov w9, v0.h[0]
; CHECK-NEXT: smov w10, v0.h[3]
; CHECK-NEXT: movk w11, #44150, lsl #16
; CHECK-NEXT: and w12, w12, #0xffffffe0
; CHECK-NEXT: sub w8, w8, w12
; CHECK-NEXT: add w12, w9, #63 // =63
; CHECK-NEXT: add w12, w9, #63
; CHECK-NEXT: smull x11, w10, w11
; CHECK-NEXT: cmp w9, #0 // =0
; CHECK-NEXT: cmp w9, #0
; CHECK-NEXT: lsr x11, x11, #32
; CHECK-NEXT: csel w12, w12, w9, lt
; CHECK-NEXT: add w11, w11, w10
Expand All @@ -178,8 +178,8 @@ define <4 x i16> @dont_fold_srem_power_of_two(<4 x i16> %x) {
; CHECK-NEXT: add w11, w12, w11, lsr #31
; CHECK-NEXT: smov w12, v0.h[2]
; CHECK-NEXT: fmov s0, w9
; CHECK-NEXT: add w9, w12, #7 // =7
; CHECK-NEXT: cmp w12, #0 // =0
; CHECK-NEXT: add w9, w12, #7
; CHECK-NEXT: cmp w12, #0
; CHECK-NEXT: csel w9, w9, w12, lt
; CHECK-NEXT: and w9, w9, #0xfffffff8
; CHECK-NEXT: sub w9, w12, w9
Expand Down Expand Up @@ -263,7 +263,7 @@ define <4 x i16> @dont_fold_srem_i16_smax(<4 x i16> %x) {
; CHECK-NEXT: add w10, w10, w11
; CHECK-NEXT: mov w11, #32767
; CHECK-NEXT: add w11, w8, w11
; CHECK-NEXT: cmp w8, #0 // =0
; CHECK-NEXT: cmp w8, #0
; CHECK-NEXT: csel w11, w11, w8, lt
; CHECK-NEXT: and w11, w11, #0xffff8000
; CHECK-NEXT: sub w8, w8, w11
Expand Down
12 changes: 6 additions & 6 deletions llvm/test/CodeGen/AArch64/ssub_sat.ll
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ define i32 @func(i32 %x, i32 %y) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: subs w8, w0, w1
; CHECK-NEXT: mov w9, #2147483647
; CHECK-NEXT: cmp w8, #0 // =0
; CHECK-NEXT: cmp w8, #0
; CHECK-NEXT: cinv w8, w9, ge
; CHECK-NEXT: subs w9, w0, w1
; CHECK-NEXT: csel w0, w8, w9, vs
Expand All @@ -27,7 +27,7 @@ define i64 @func2(i64 %x, i64 %y) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: subs x8, x0, x1
; CHECK-NEXT: mov x9, #9223372036854775807
; CHECK-NEXT: cmp x8, #0 // =0
; CHECK-NEXT: cmp x8, #0
; CHECK-NEXT: cinv x8, x9, ge
; CHECK-NEXT: subs x9, x0, x1
; CHECK-NEXT: csel x0, x8, x9, vs
Expand Down Expand Up @@ -58,9 +58,9 @@ define i8 @func8(i8 %x, i8 %y) nounwind {
; CHECK-NEXT: sxtb w8, w0
; CHECK-NEXT: sub w8, w8, w1, sxtb
; CHECK-NEXT: mov w9, #127
; CHECK-NEXT: cmp w8, #127 // =127
; CHECK-NEXT: cmp w8, #127
; CHECK-NEXT: csel w8, w8, w9, lt
; CHECK-NEXT: cmn w8, #128 // =128
; CHECK-NEXT: cmn w8, #128
; CHECK-NEXT: mov w9, #-128
; CHECK-NEXT: csel w0, w8, w9, gt
; CHECK-NEXT: ret
Expand All @@ -75,9 +75,9 @@ define i4 @func3(i4 %x, i4 %y) nounwind {
; CHECK-NEXT: sbfx w9, w0, #0, #4
; CHECK-NEXT: sub w8, w9, w8, asr #28
; CHECK-NEXT: mov w10, #7
; CHECK-NEXT: cmp w8, #7 // =7
; CHECK-NEXT: cmp w8, #7
; CHECK-NEXT: csel w8, w8, w10, lt
; CHECK-NEXT: cmn w8, #8 // =8
; CHECK-NEXT: cmn w8, #8
; CHECK-NEXT: mov w9, #-8
; CHECK-NEXT: csel w0, w8, w9, gt
; CHECK-NEXT: ret
Expand Down
12 changes: 6 additions & 6 deletions llvm/test/CodeGen/AArch64/ssub_sat_plus.ll
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
; CHECK-NEXT: mul w8, w1, w2
; CHECK-NEXT: subs w10, w0, w8
; CHECK-NEXT: mov w9, #2147483647
; CHECK-NEXT: cmp w10, #0 // =0
; CHECK-NEXT: cmp w10, #0
; CHECK-NEXT: cinv w9, w9, ge
; CHECK-NEXT: subs w8, w0, w8
; CHECK-NEXT: csel w0, w9, w8, vs
Expand All @@ -28,7 +28,7 @@ define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: subs x8, x0, x2
; CHECK-NEXT: mov x9, #9223372036854775807
; CHECK-NEXT: cmp x8, #0 // =0
; CHECK-NEXT: cmp x8, #0
; CHECK-NEXT: cinv x8, x9, ge
; CHECK-NEXT: subs x9, x0, x2
; CHECK-NEXT: csel x0, x8, x9, vs
Expand Down Expand Up @@ -63,9 +63,9 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
; CHECK-NEXT: mul w9, w1, w2
; CHECK-NEXT: sub w8, w8, w9, sxtb
; CHECK-NEXT: mov w10, #127
; CHECK-NEXT: cmp w8, #127 // =127
; CHECK-NEXT: cmp w8, #127
; CHECK-NEXT: csel w8, w8, w10, lt
; CHECK-NEXT: cmn w8, #128 // =128
; CHECK-NEXT: cmn w8, #128
; CHECK-NEXT: mov w9, #-128
; CHECK-NEXT: csel w0, w8, w9, gt
; CHECK-NEXT: ret
Expand All @@ -82,9 +82,9 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
; CHECK-NEXT: lsl w9, w9, #28
; CHECK-NEXT: sub w8, w8, w9, asr #28
; CHECK-NEXT: mov w10, #7
; CHECK-NEXT: cmp w8, #7 // =7
; CHECK-NEXT: cmp w8, #7
; CHECK-NEXT: csel w8, w8, w10, lt
; CHECK-NEXT: cmn w8, #8 // =8
; CHECK-NEXT: cmn w8, #8
; CHECK-NEXT: mov w9, #-8
; CHECK-NEXT: csel w0, w8, w9, gt
; CHECK-NEXT: ret
Expand Down
12 changes: 6 additions & 6 deletions llvm/test/CodeGen/AArch64/ssub_sat_vec.ll
Original file line number Diff line number Diff line change
Expand Up @@ -136,8 +136,8 @@ define void @v2i8(<2 x i8>* %px, <2 x i8>* %py, <2 x i8>* %pz) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ld1 { v0.b }[0], [x1]
; CHECK-NEXT: ld1 { v1.b }[0], [x0]
; CHECK-NEXT: add x8, x0, #1 // =1
; CHECK-NEXT: add x9, x1, #1 // =1
; CHECK-NEXT: add x8, x0, #1
; CHECK-NEXT: add x9, x1, #1
; CHECK-NEXT: ld1 { v0.b }[4], [x9]
; CHECK-NEXT: ld1 { v1.b }[4], [x8]
; CHECK-NEXT: shl v0.2s, v0.2s, #24
Expand Down Expand Up @@ -176,8 +176,8 @@ define void @v2i16(<2 x i16>* %px, <2 x i16>* %py, <2 x i16>* %pz) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: ld1 { v0.h }[0], [x1]
; CHECK-NEXT: ld1 { v1.h }[0], [x0]
; CHECK-NEXT: add x8, x0, #2 // =2
; CHECK-NEXT: add x9, x1, #2 // =2
; CHECK-NEXT: add x8, x0, #2
; CHECK-NEXT: add x9, x1, #2
; CHECK-NEXT: ld1 { v0.h }[2], [x9]
; CHECK-NEXT: ld1 { v1.h }[2], [x8]
; CHECK-NEXT: shl v0.2s, v0.2s, #16
Expand Down Expand Up @@ -357,7 +357,7 @@ define <2 x i128> @v2i128(<2 x i128> %x, <2 x i128> %y) nounwind {
; CHECK-NEXT: sbcs x12, x3, x7
; CHECK-NEXT: mov x9, #9223372036854775807
; CHECK-NEXT: eor x10, x3, x7
; CHECK-NEXT: cmp x12, #0 // =0
; CHECK-NEXT: cmp x12, #0
; CHECK-NEXT: eor x13, x3, x12
; CHECK-NEXT: cinv x14, x9, ge
; CHECK-NEXT: tst x10, x13
Expand All @@ -367,7 +367,7 @@ define <2 x i128> @v2i128(<2 x i128> %x, <2 x i128> %y) nounwind {
; CHECK-NEXT: subs x8, x0, x4
; CHECK-NEXT: sbcs x10, x1, x5
; CHECK-NEXT: eor x11, x1, x5
; CHECK-NEXT: cmp x10, #0 // =0
; CHECK-NEXT: cmp x10, #0
; CHECK-NEXT: eor x12, x1, x10
; CHECK-NEXT: cinv x9, x9, ge
; CHECK-NEXT: tst x11, x12
Expand Down
6 changes: 3 additions & 3 deletions llvm/test/CodeGen/AArch64/stack-guard-remat-bitcast.ll
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,9 @@
define i32 @test_stack_guard_remat2() ssp {
; CHECK-LABEL: test_stack_guard_remat2:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: sub sp, sp, #64 ; =64
; CHECK-NEXT: sub sp, sp, #64
; CHECK-NEXT: stp x29, x30, [sp, #48] ; 16-byte Folded Spill
; CHECK-NEXT: add x29, sp, #48 ; =48
; CHECK-NEXT: add x29, sp, #48
; CHECK-NEXT: .cfi_def_cfa w29, 16
; CHECK-NEXT: .cfi_offset w30, -8
; CHECK-NEXT: .cfi_offset w29, -16
Expand Down Expand Up @@ -41,7 +41,7 @@ define i32 @test_stack_guard_remat2() ssp {
; CHECK-NEXT: ; %bb.1: ; %entry
; CHECK-NEXT: ldp x29, x30, [sp, #48] ; 16-byte Folded Reload
; CHECK-NEXT: mov w0, #-1
; CHECK-NEXT: add sp, sp, #64 ; =64
; CHECK-NEXT: add sp, sp, #64
; CHECK-NEXT: ret
; CHECK-NEXT: LBB0_2: ; %entry
; CHECK-NEXT: bl ___stack_chk_fail
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/AArch64/stack-guard-sysreg.ll
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ define dso_local void @foo(i64 %t) local_unnamed_addr #0 {
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
; CHECK-NEXT: mov x29, sp
; CHECK-NEXT: sub sp, sp, #16 // =16
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: .cfi_def_cfa w29, 16
; CHECK-NEXT: .cfi_offset w30, -8
; CHECK-NEXT: .cfi_offset w29, -16
Expand All @@ -58,7 +58,7 @@ define dso_local void @foo(i64 %t) local_unnamed_addr #0 {
; CHECK-MINUS-257-OFFSET: sub x8, x8, #257
; CHECK-MINUS-257-OFFSET-NEXT: ldr x8, [x8]
; CHECK-NEXT: lsl x9, x0, #2
; CHECK-NEXT: add x9, x9, #15 // =15
; CHECK-NEXT: add x9, x9, #15
; CHECK-NEXT: and x9, x9, #0xfffffffffffffff0
; CHECK-NEXT: stur x8, [x29, #-8]
; CHECK-NEXT: mov x8, sp
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/AArch64/statepoint-call-lowering.ll
Original file line number Diff line number Diff line change
Expand Up @@ -81,13 +81,13 @@ entry:
define i1 @test_relocate(i32 addrspace(1)* %a) gc "statepoint-example" {
; CHECK-LABEL: test_relocate:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: sub sp, sp, #16 // =16
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: str x0, [sp, #8]
; CHECK-NEXT: bl return_i1
; CHECK-NEXT: .Ltmp7:
; CHECK-NEXT: and w0, w0, #0x1
; CHECK-NEXT: add sp, sp, #16 // =16
; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ret
; Check that an ununsed relocate has no code-generation impact
entry:
Expand Down Expand Up @@ -176,7 +176,7 @@ declare void @consume_attributes(i32, i8* nest, i32, %struct2* byval(%struct2))
define void @test_attributes(%struct2* byval(%struct2) %s) gc "statepoint-example" {
; CHECK-LABEL: test_attributes:
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: sub sp, sp, #32 // =32
; CHECK-NEXT: sub sp, sp, #32
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: ldr x8, [sp, #48]
; CHECK-NEXT: ldr q0, [sp, #32]
Expand All @@ -187,7 +187,7 @@ define void @test_attributes(%struct2* byval(%struct2) %s) gc "statepoint-exampl
; CHECK-NEXT: str q0, [sp]
; CHECK-NEXT: bl consume_attributes
; CHECK-NEXT: .Ltmp11:
; CHECK-NEXT: add sp, sp, #32 // =32
; CHECK-NEXT: add sp, sp, #32
; CHECK-NEXT: ret
entry:
; Check that arguments with attributes are lowered correctly.
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/AArch64/sub-of-not.ll
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ define i8 @scalar_i8(i8 %x, i8 %y) nounwind {
; CHECK-LABEL: scalar_i8:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w1, w0
; CHECK-NEXT: add w0, w8, #1 // =1
; CHECK-NEXT: add w0, w8, #1
; CHECK-NEXT: ret
%t0 = xor i8 %x, -1
%t1 = sub i8 %y, %t0
Expand All @@ -21,7 +21,7 @@ define i16 @scalar_i16(i16 %x, i16 %y) nounwind {
; CHECK-LABEL: scalar_i16:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w1, w0
; CHECK-NEXT: add w0, w8, #1 // =1
; CHECK-NEXT: add w0, w8, #1
; CHECK-NEXT: ret
%t0 = xor i16 %x, -1
%t1 = sub i16 %y, %t0
Expand All @@ -32,7 +32,7 @@ define i32 @scalar_i32(i32 %x, i32 %y) nounwind {
; CHECK-LABEL: scalar_i32:
; CHECK: // %bb.0:
; CHECK-NEXT: add w8, w1, w0
; CHECK-NEXT: add w0, w8, #1 // =1
; CHECK-NEXT: add w0, w8, #1
; CHECK-NEXT: ret
%t0 = xor i32 %x, -1
%t1 = sub i32 %y, %t0
Expand All @@ -43,7 +43,7 @@ define i64 @scalar_i64(i64 %x, i64 %y) nounwind {
; CHECK-LABEL: scalar_i64:
; CHECK: // %bb.0:
; CHECK-NEXT: add x8, x1, x0
; CHECK-NEXT: add x0, x8, #1 // =1
; CHECK-NEXT: add x0, x8, #1
; CHECK-NEXT: ret
%t0 = xor i64 %x, -1
%t1 = sub i64 %y, %t0
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/AArch64/sub1.ll
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
define i64 @sub1_disguised_constant(i64 %x) {
; CHECK-LABEL: sub1_disguised_constant:
; CHECK: // %bb.0:
; CHECK-NEXT: sub w8, w0, #1 // =1
; CHECK-NEXT: sub w8, w0, #1
; CHECK-NEXT: and w8, w0, w8
; CHECK-NEXT: and x0, x8, #0xffff
; CHECK-NEXT: ret
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
Original file line number Diff line number Diff line change
Expand Up @@ -42,13 +42,13 @@ define float @foo2(double* %x0, double* %x1) nounwind {
; CHECK: // %bb.0: // %entry
; CHECK-NEXT: stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-4
; CHECK-NEXT: sub sp, sp, #16 // =16
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: ptrue p0.b
; CHECK-NEXT: ld4d { z1.d, z2.d, z3.d, z4.d }, p0/z, [x0]
; CHECK-NEXT: ld4d { z16.d, z17.d, z18.d, z19.d }, p0/z, [x1]
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: add x8, sp, #16 // =16
; CHECK-NEXT: add x9, sp, #16 // =16
; CHECK-NEXT: add x8, sp, #16
; CHECK-NEXT: add x9, sp, #16
; CHECK-NEXT: fmov s0, #1.00000000
; CHECK-NEXT: mov w1, #1
; CHECK-NEXT: mov w2, #2
Expand All @@ -65,7 +65,7 @@ define float @foo2(double* %x0, double* %x1) nounwind {
; CHECK-NEXT: str x8, [sp]
; CHECK-NEXT: bl callee2
; CHECK-NEXT: addvl sp, sp, #4
; CHECK-NEXT: add sp, sp, #16 // =16
; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
; CHECK-NEXT: ret
entry:
Expand Down
24 changes: 12 additions & 12 deletions llvm/test/CodeGen/AArch64/sve-extract-vector.ll
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,9 @@ define <2 x i64> @extract_v2i64_nxv2i64_idx2(<vscale x 2 x i64> %vec) nounwind {
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: cntd x9
; CHECK-NEXT: sub x9, x9, #2 // =2
; CHECK-NEXT: sub x9, x9, #2
; CHECK-NEXT: mov w8, #2
; CHECK-NEXT: cmp x9, #2 // =2
; CHECK-NEXT: cmp x9, #2
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: csel x8, x9, x8, lo
; CHECK-NEXT: st1d { z0.d }, p0, [sp]
Expand Down Expand Up @@ -51,9 +51,9 @@ define <4 x i32> @extract_v4i32_nxv4i32_idx4(<vscale x 4 x i32> %vec) nounwind {
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: cntw x9
; CHECK-NEXT: sub x9, x9, #4 // =4
; CHECK-NEXT: sub x9, x9, #4
; CHECK-NEXT: mov w8, #4
; CHECK-NEXT: cmp x9, #4 // =4
; CHECK-NEXT: cmp x9, #4
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: csel x8, x9, x8, lo
; CHECK-NEXT: st1w { z0.s }, p0, [sp]
Expand Down Expand Up @@ -84,9 +84,9 @@ define <8 x i16> @extract_v8i16_nxv8i16_idx8(<vscale x 8 x i16> %vec) nounwind {
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: cnth x9
; CHECK-NEXT: sub x9, x9, #8 // =8
; CHECK-NEXT: sub x9, x9, #8
; CHECK-NEXT: mov w8, #8
; CHECK-NEXT: cmp x9, #8 // =8
; CHECK-NEXT: cmp x9, #8
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: csel x8, x9, x8, lo
; CHECK-NEXT: st1h { z0.h }, p0, [sp]
Expand Down Expand Up @@ -117,10 +117,10 @@ define <16 x i8> @extract_v16i8_nxv16i8_idx16(<vscale x 16 x i8> %vec) nounwind
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: rdvl x9, #1
; CHECK-NEXT: sub x9, x9, #16 // =16
; CHECK-NEXT: sub x9, x9, #16
; CHECK-NEXT: ptrue p0.b
; CHECK-NEXT: mov w8, #16
; CHECK-NEXT: cmp x9, #16 // =16
; CHECK-NEXT: cmp x9, #16
; CHECK-NEXT: st1b { z0.b }, p0, [sp]
; CHECK-NEXT: csel x8, x9, x8, lo
; CHECK-NEXT: mov x9, sp
Expand Down Expand Up @@ -159,9 +159,9 @@ define <2 x i64> @extract_fixed_v2i64_nxv2i64(<vscale x 2 x i64> %vec) nounwind
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: cntd x9
; CHECK-NEXT: sub x9, x9, #2 // =2
; CHECK-NEXT: sub x9, x9, #2
; CHECK-NEXT: mov w8, #2
; CHECK-NEXT: cmp x9, #2 // =2
; CHECK-NEXT: cmp x9, #2
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: csel x8, x9, x8, lo
; CHECK-NEXT: st1d { z0.d }, p0, [sp]
Expand All @@ -181,11 +181,11 @@ define <4 x i64> @extract_fixed_v4i64_nxv2i64(<vscale x 2 x i64> %vec) nounwind
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: cntd x9
; CHECK-NEXT: subs x9, x9, #4 // =4
; CHECK-NEXT: subs x9, x9, #4
; CHECK-NEXT: csel x9, xzr, x9, lo
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: mov w10, #4
; CHECK-NEXT: cmp x9, #4 // =4
; CHECK-NEXT: cmp x9, #4
; CHECK-NEXT: ptrue p1.d, vl4
; CHECK-NEXT: st1d { z0.d }, p0, [sp]
; CHECK-NEXT: csel x9, x9, x10, lo
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/AArch64/sve-insert-element.ll
Original file line number Diff line number Diff line change
Expand Up @@ -506,7 +506,7 @@ define <vscale x 32 x i1> @test_predicate_insert_32xi1(<vscale x 32 x i1> %val,
; CHECK-NEXT: rdvl x10, #2
; CHECK-NEXT: // kill: def $w1 killed $w1 def $x1
; CHECK-NEXT: sxtw x9, w1
; CHECK-NEXT: sub x10, x10, #1 // =1
; CHECK-NEXT: sub x10, x10, #1
; CHECK-NEXT: cmp x9, x10
; CHECK-NEXT: mov z0.b, p1/z, #1 // =0x1
; CHECK-NEXT: ptrue p1.b
Expand Down
24 changes: 12 additions & 12 deletions llvm/test/CodeGen/AArch64/sve-insert-vector.ll
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,9 @@ define <vscale x 2 x i64> @insert_v2i64_nxv2i64_idx2(<vscale x 2 x i64> %vec, <2
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: cntd x9
; CHECK-NEXT: sub x9, x9, #2 // =2
; CHECK-NEXT: sub x9, x9, #2
; CHECK-NEXT: mov w8, #2
; CHECK-NEXT: cmp x9, #2 // =2
; CHECK-NEXT: cmp x9, #2
; CHECK-NEXT: csel x8, x9, x8, lo
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: lsl x8, x8, #3
Expand Down Expand Up @@ -62,9 +62,9 @@ define <vscale x 4 x i32> @insert_v4i32_nxv4i32_idx4(<vscale x 4 x i32> %vec, <4
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: cntw x9
; CHECK-NEXT: sub x9, x9, #4 // =4
; CHECK-NEXT: sub x9, x9, #4
; CHECK-NEXT: mov w8, #4
; CHECK-NEXT: cmp x9, #4 // =4
; CHECK-NEXT: cmp x9, #4
; CHECK-NEXT: csel x8, x9, x8, lo
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: lsl x8, x8, #2
Expand Down Expand Up @@ -101,9 +101,9 @@ define <vscale x 8 x i16> @insert_v8i16_nxv8i16_idx8(<vscale x 8 x i16> %vec, <8
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: cnth x9
; CHECK-NEXT: sub x9, x9, #8 // =8
; CHECK-NEXT: sub x9, x9, #8
; CHECK-NEXT: mov w8, #8
; CHECK-NEXT: cmp x9, #8 // =8
; CHECK-NEXT: cmp x9, #8
; CHECK-NEXT: csel x8, x9, x8, lo
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: lsl x8, x8, #1
Expand Down Expand Up @@ -140,9 +140,9 @@ define <vscale x 16 x i8> @insert_v16i8_nxv16i8_idx16(<vscale x 16 x i8> %vec, <
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: rdvl x9, #1
; CHECK-NEXT: sub x9, x9, #16 // =16
; CHECK-NEXT: sub x9, x9, #16
; CHECK-NEXT: mov w8, #16
; CHECK-NEXT: cmp x9, #16 // =16
; CHECK-NEXT: cmp x9, #16
; CHECK-NEXT: ptrue p0.b
; CHECK-NEXT: csel x8, x9, x8, lo
; CHECK-NEXT: mov x9, sp
Expand Down Expand Up @@ -307,9 +307,9 @@ define <vscale x 2 x i64> @insert_fixed_v2i64_nxv2i64(<vscale x 2 x i64> %vec, <
; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
; CHECK-NEXT: addvl sp, sp, #-1
; CHECK-NEXT: cntd x9
; CHECK-NEXT: sub x9, x9, #2 // =2
; CHECK-NEXT: sub x9, x9, #2
; CHECK-NEXT: mov w8, #2
; CHECK-NEXT: cmp x9, #2 // =2
; CHECK-NEXT: cmp x9, #2
; CHECK-NEXT: csel x8, x9, x8, lo
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: lsl x8, x8, #3
Expand All @@ -332,10 +332,10 @@ define <vscale x 2 x i64> @insert_fixed_v4i64_nxv2i64(<vscale x 2 x i64> %vec, <
; CHECK-NEXT: ptrue p0.d, vl4
; CHECK-NEXT: cntd x8
; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0]
; CHECK-NEXT: subs x8, x8, #4 // =4
; CHECK-NEXT: subs x8, x8, #4
; CHECK-NEXT: csel x8, xzr, x8, lo
; CHECK-NEXT: mov w9, #4
; CHECK-NEXT: cmp x8, #4 // =4
; CHECK-NEXT: cmp x8, #4
; CHECK-NEXT: ptrue p1.d
; CHECK-NEXT: csel x8, x8, x9, lo
; CHECK-NEXT: mov x9, sp
Expand Down
44 changes: 22 additions & 22 deletions llvm/test/CodeGen/AArch64/sve-ld1r.ll
Original file line number Diff line number Diff line change
Expand Up @@ -17,15 +17,15 @@
define <vscale x 16 x i8> @ld1r_stack() {
; CHECK-LABEL: ld1r_stack:
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #16 // =16
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: adrp x8, :got:g8
; CHECK-NEXT: ldr x8, [x8, :got_lo12:g8]
; CHECK-NEXT: ptrue p0.b
; CHECK-NEXT: ldrb w8, [x8]
; CHECK-NEXT: strb w8, [sp, #12]
; CHECK-NEXT: ld1rb { z0.b }, p0/z, [sp, #14]
; CHECK-NEXT: add sp, sp, #16 // =16
; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ret
%valp = alloca i8
%valp2 = load volatile i8, i8* @g8
Expand Down Expand Up @@ -65,7 +65,7 @@ define <vscale x 16 x i8> @ld1rb_gep(i8* %valp) {
define <vscale x 16 x i8> @ld1rb_gep_out_of_range_up(i8* %valp) {
; CHECK-LABEL: ld1rb_gep_out_of_range_up:
; CHECK: // %bb.0:
; CHECK-NEXT: add x8, x0, #64 // =64
; CHECK-NEXT: add x8, x0, #64
; CHECK-NEXT: ptrue p0.b
; CHECK-NEXT: ld1rb { z0.b }, p0/z, [x8]
; CHECK-NEXT: ret
Expand All @@ -79,7 +79,7 @@ define <vscale x 16 x i8> @ld1rb_gep_out_of_range_up(i8* %valp) {
define <vscale x 16 x i8> @ld1rb_gep_out_of_range_down(i8* %valp) {
; CHECK-LABEL: ld1rb_gep_out_of_range_down:
; CHECK: // %bb.0:
; CHECK-NEXT: sub x8, x0, #1 // =1
; CHECK-NEXT: sub x8, x0, #1
; CHECK-NEXT: ptrue p0.b
; CHECK-NEXT: ld1rb { z0.b }, p0/z, [x8]
; CHECK-NEXT: ret
Expand Down Expand Up @@ -196,7 +196,7 @@ define <vscale x 8 x i16> @ld1rh_gep(i16* %valp) {
define <vscale x 8 x i16> @ld1rh_gep_out_of_range_up(i16* %valp) {
; CHECK-LABEL: ld1rh_gep_out_of_range_up:
; CHECK: // %bb.0:
; CHECK-NEXT: add x8, x0, #128 // =128
; CHECK-NEXT: add x8, x0, #128
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: ld1rh { z0.h }, p0/z, [x8]
; CHECK-NEXT: ret
Expand All @@ -210,7 +210,7 @@ define <vscale x 8 x i16> @ld1rh_gep_out_of_range_up(i16* %valp) {
define <vscale x 8 x i16> @ld1rh_gep_out_of_range_down(i16* %valp) {
; CHECK-LABEL: ld1rh_gep_out_of_range_down:
; CHECK: // %bb.0:
; CHECK-NEXT: sub x8, x0, #2 // =2
; CHECK-NEXT: sub x8, x0, #2
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: ld1rh { z0.h }, p0/z, [x8]
; CHECK-NEXT: ret
Expand Down Expand Up @@ -301,7 +301,7 @@ define <vscale x 4 x i32> @ld1rw_gep(i32* %valp) {
define <vscale x 4 x i32> @ld1rw_gep_out_of_range_up(i32* %valp) {
; CHECK-LABEL: ld1rw_gep_out_of_range_up:
; CHECK: // %bb.0:
; CHECK-NEXT: add x8, x0, #256 // =256
; CHECK-NEXT: add x8, x0, #256
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: ld1rw { z0.s }, p0/z, [x8]
; CHECK-NEXT: ret
Expand All @@ -315,7 +315,7 @@ define <vscale x 4 x i32> @ld1rw_gep_out_of_range_up(i32* %valp) {
define <vscale x 4 x i32> @ld1rw_gep_out_of_range_down(i32* %valp) {
; CHECK-LABEL: ld1rw_gep_out_of_range_down:
; CHECK: // %bb.0:
; CHECK-NEXT: sub x8, x0, #4 // =4
; CHECK-NEXT: sub x8, x0, #4
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: ld1rw { z0.s }, p0/z, [x8]
; CHECK-NEXT: ret
Expand Down Expand Up @@ -380,7 +380,7 @@ define <vscale x 2 x i64> @ld1rd_gep(i64* %valp) {
define <vscale x 2 x i64> @ld1rd_gep_out_of_range_up(i64* %valp) {
; CHECK-LABEL: ld1rd_gep_out_of_range_up:
; CHECK: // %bb.0:
; CHECK-NEXT: add x8, x0, #512 // =512
; CHECK-NEXT: add x8, x0, #512
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ld1rd { z0.d }, p0/z, [x8]
; CHECK-NEXT: ret
Expand All @@ -394,7 +394,7 @@ define <vscale x 2 x i64> @ld1rd_gep_out_of_range_up(i64* %valp) {
define <vscale x 2 x i64> @ld1rd_gep_out_of_range_down(i64* %valp) {
; CHECK-LABEL: ld1rd_gep_out_of_range_down:
; CHECK: // %bb.0:
; CHECK-NEXT: sub x8, x0, #8 // =8
; CHECK-NEXT: sub x8, x0, #8
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ld1rd { z0.d }, p0/z, [x8]
; CHECK-NEXT: ret
Expand Down Expand Up @@ -433,7 +433,7 @@ define <vscale x 8 x half> @ld1rh_half_gep(half* %valp) {
define <vscale x 8 x half> @ld1rh_half_gep_out_of_range_up(half* %valp) {
; CHECK-LABEL: ld1rh_half_gep_out_of_range_up:
; CHECK: // %bb.0:
; CHECK-NEXT: add x8, x0, #128 // =128
; CHECK-NEXT: add x8, x0, #128
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: ld1rh { z0.h }, p0/z, [x8]
; CHECK-NEXT: ret
Expand All @@ -447,7 +447,7 @@ define <vscale x 8 x half> @ld1rh_half_gep_out_of_range_up(half* %valp) {
define <vscale x 8 x half> @ld1rh_half_gep_out_of_range_down(half* %valp) {
; CHECK-LABEL: ld1rh_half_gep_out_of_range_down:
; CHECK: // %bb.0:
; CHECK-NEXT: sub x8, x0, #2 // =2
; CHECK-NEXT: sub x8, x0, #2
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: ld1rh { z0.h }, p0/z, [x8]
; CHECK-NEXT: ret
Expand Down Expand Up @@ -486,7 +486,7 @@ define <vscale x 4 x half> @ld1rh_half_unpacked4_gep(half* %valp) {
define <vscale x 4 x half> @ld1rh_half_unpacked4_gep_out_of_range_up(half* %valp) {
; CHECK-LABEL: ld1rh_half_unpacked4_gep_out_of_range_up:
; CHECK: // %bb.0:
; CHECK-NEXT: add x8, x0, #128 // =128
; CHECK-NEXT: add x8, x0, #128
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: ld1rh { z0.s }, p0/z, [x8]
; CHECK-NEXT: ret
Expand All @@ -500,7 +500,7 @@ define <vscale x 4 x half> @ld1rh_half_unpacked4_gep_out_of_range_up(half* %valp
define <vscale x 4 x half> @ld1rh_half_unpacked4_gep_out_of_range_down(half* %valp) {
; CHECK-LABEL: ld1rh_half_unpacked4_gep_out_of_range_down:
; CHECK: // %bb.0:
; CHECK-NEXT: sub x8, x0, #2 // =2
; CHECK-NEXT: sub x8, x0, #2
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: ld1rh { z0.s }, p0/z, [x8]
; CHECK-NEXT: ret
Expand Down Expand Up @@ -539,7 +539,7 @@ define <vscale x 2 x half> @ld1rh_half_unpacked2_gep(half* %valp) {
define <vscale x 2 x half> @ld1rh_half_unpacked2_gep_out_of_range_up(half* %valp) {
; CHECK-LABEL: ld1rh_half_unpacked2_gep_out_of_range_up:
; CHECK: // %bb.0:
; CHECK-NEXT: add x8, x0, #128 // =128
; CHECK-NEXT: add x8, x0, #128
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ld1rh { z0.d }, p0/z, [x8]
; CHECK-NEXT: ret
Expand All @@ -553,7 +553,7 @@ define <vscale x 2 x half> @ld1rh_half_unpacked2_gep_out_of_range_up(half* %valp
define <vscale x 2 x half> @ld1rh_half_unpacked2_gep_out_of_range_down(half* %valp) {
; CHECK-LABEL: ld1rh_half_unpacked2_gep_out_of_range_down:
; CHECK: // %bb.0:
; CHECK-NEXT: sub x8, x0, #2 // =2
; CHECK-NEXT: sub x8, x0, #2
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ld1rh { z0.d }, p0/z, [x8]
; CHECK-NEXT: ret
Expand Down Expand Up @@ -592,7 +592,7 @@ define <vscale x 4 x float> @ld1rw_float_gep(float* %valp) {
define <vscale x 4 x float> @ld1rw_float_gep_out_of_range_up(float* %valp) {
; CHECK-LABEL: ld1rw_float_gep_out_of_range_up:
; CHECK: // %bb.0:
; CHECK-NEXT: add x8, x0, #256 // =256
; CHECK-NEXT: add x8, x0, #256
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: ld1rw { z0.s }, p0/z, [x8]
; CHECK-NEXT: ret
Expand All @@ -606,7 +606,7 @@ define <vscale x 4 x float> @ld1rw_float_gep_out_of_range_up(float* %valp) {
define <vscale x 4 x float> @ld1rw_float_gep_out_of_range_down(float* %valp) {
; CHECK-LABEL: ld1rw_float_gep_out_of_range_down:
; CHECK: // %bb.0:
; CHECK-NEXT: sub x8, x0, #4 // =4
; CHECK-NEXT: sub x8, x0, #4
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: ld1rw { z0.s }, p0/z, [x8]
; CHECK-NEXT: ret
Expand Down Expand Up @@ -645,7 +645,7 @@ define <vscale x 2 x float> @ld1rw_float_unpacked2_gep(float* %valp) {
define <vscale x 2 x float> @ld1rw_float_unpacked2_gep_out_of_range_up(float* %valp) {
; CHECK-LABEL: ld1rw_float_unpacked2_gep_out_of_range_up:
; CHECK: // %bb.0:
; CHECK-NEXT: add x8, x0, #256 // =256
; CHECK-NEXT: add x8, x0, #256
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ld1rw { z0.d }, p0/z, [x8]
; CHECK-NEXT: ret
Expand All @@ -659,7 +659,7 @@ define <vscale x 2 x float> @ld1rw_float_unpacked2_gep_out_of_range_up(float* %v
define <vscale x 2 x float> @ld1rw_float_unpacked2_gep_out_of_range_down(float* %valp) {
; CHECK-LABEL: ld1rw_float_unpacked2_gep_out_of_range_down:
; CHECK: // %bb.0:
; CHECK-NEXT: sub x8, x0, #4 // =4
; CHECK-NEXT: sub x8, x0, #4
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ld1rw { z0.d }, p0/z, [x8]
; CHECK-NEXT: ret
Expand Down Expand Up @@ -698,7 +698,7 @@ define <vscale x 2 x double> @ld1rd_double_gep(double* %valp) {
define <vscale x 2 x double> @ld1rd_double_gep_out_of_range_up(double* %valp) {
; CHECK-LABEL: ld1rd_double_gep_out_of_range_up:
; CHECK: // %bb.0:
; CHECK-NEXT: add x8, x0, #512 // =512
; CHECK-NEXT: add x8, x0, #512
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ld1rd { z0.d }, p0/z, [x8]
; CHECK-NEXT: ret
Expand All @@ -712,7 +712,7 @@ define <vscale x 2 x double> @ld1rd_double_gep_out_of_range_up(double* %valp) {
define <vscale x 2 x double> @ld1rd_double_gep_out_of_range_down(double* %valp) {
; CHECK-LABEL: ld1rd_double_gep_out_of_range_down:
; CHECK: // %bb.0:
; CHECK-NEXT: sub x8, x0, #8 // =8
; CHECK-NEXT: sub x8, x0, #8
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ld1rd { z0.d }, p0/z, [x8]
; CHECK-NEXT: ret
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ define void @ld_st_nxv8i16(i16* %in, i16* %out) {
; ASM-NEXT: add z1.h, z1.h, z0.h
; ASM-NEXT: st1h { z1.h }, p0, [x1, x8, lsl #1]
; ASM-NEXT: add x8, x8, x9
; ASM-NEXT: cmp x8, #1024 // =1024
; ASM-NEXT: cmp x8, #1024
; ASM-NEXT: b.ne .LBB0_1
; ASM-NEXT: // %bb.2: // %exit
; ASM-NEXT: ret
Expand Down
18 changes: 9 additions & 9 deletions llvm/test/CodeGen/AArch64/sve-split-extract-elt.ll
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ define i8 @split_extract_32i8_idx(<vscale x 32 x i8> %a, i32 %idx) {
; CHECK-NEXT: rdvl x10, #2
; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
; CHECK-NEXT: sxtw x9, w0
; CHECK-NEXT: sub x10, x10, #1 // =1
; CHECK-NEXT: sub x10, x10, #1
; CHECK-NEXT: ptrue p0.b
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: cmp x9, x10
Expand All @@ -51,7 +51,7 @@ define i16 @split_extract_16i16_idx(<vscale x 16 x i16> %a, i32 %idx) {
; CHECK-NEXT: rdvl x10, #1
; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
; CHECK-NEXT: sxtw x9, w0
; CHECK-NEXT: sub x10, x10, #1 // =1
; CHECK-NEXT: sub x10, x10, #1
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: cmp x9, x10
Expand All @@ -76,7 +76,7 @@ define i32 @split_extract_8i32_idx(<vscale x 8 x i32> %a, i32 %idx) {
; CHECK-NEXT: cnth x10
; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
; CHECK-NEXT: sxtw x9, w0
; CHECK-NEXT: sub x10, x10, #1 // =1
; CHECK-NEXT: sub x10, x10, #1
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: cmp x9, x10
Expand All @@ -101,7 +101,7 @@ define i64 @split_extract_8i64_idx(<vscale x 8 x i64> %a, i32 %idx) {
; CHECK-NEXT: cnth x10
; CHECK-NEXT: // kill: def $w0 killed $w0 def $x0
; CHECK-NEXT: sxtw x9, w0
; CHECK-NEXT: sub x10, x10, #1 // =1
; CHECK-NEXT: sub x10, x10, #1
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: cmp x9, x10
Expand Down Expand Up @@ -146,11 +146,11 @@ define i16 @split_extract_16i16(<vscale x 16 x i16> %a) {
; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: rdvl x10, #1
; CHECK-NEXT: sub x10, x10, #1 // =1
; CHECK-NEXT: sub x10, x10, #1
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: mov w9, #128
; CHECK-NEXT: cmp x10, #128 // =128
; CHECK-NEXT: cmp x10, #128
; CHECK-NEXT: st1h { z1.h }, p0, [x8, #1, mul vl]
; CHECK-NEXT: st1h { z0.h }, p0, [sp]
; CHECK-NEXT: csel x9, x10, x9, lo
Expand All @@ -172,7 +172,7 @@ define i32 @split_extract_16i32(<vscale x 16 x i32> %a) {
; CHECK-NEXT: mov w9, #34464
; CHECK-NEXT: rdvl x10, #1
; CHECK-NEXT: movk w9, #1, lsl #16
; CHECK-NEXT: sub x10, x10, #1 // =1
; CHECK-NEXT: sub x10, x10, #1
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: cmp x10, x9
Expand All @@ -197,11 +197,11 @@ define i64 @split_extract_4i64(<vscale x 4 x i64> %a) {
; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: cntw x10
; CHECK-NEXT: sub x10, x10, #1 // =1
; CHECK-NEXT: sub x10, x10, #1
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: mov w9, #10
; CHECK-NEXT: cmp x10, #10 // =10
; CHECK-NEXT: cmp x10, #10
; CHECK-NEXT: st1d { z1.d }, p0, [x8, #1, mul vl]
; CHECK-NEXT: st1d { z0.d }, p0, [sp]
; CHECK-NEXT: csel x9, x10, x9, lo
Expand Down
12 changes: 6 additions & 6 deletions llvm/test/CodeGen/AArch64/sve-split-insert-elt.ll
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ define <vscale x 32 x i8> @split_insert_32i8_idx(<vscale x 32 x i8> %a, i8 %elt,
; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: rdvl x8, #2
; CHECK-NEXT: sub x8, x8, #1 // =1
; CHECK-NEXT: sub x8, x8, #1
; CHECK-NEXT: cmp x1, x8
; CHECK-NEXT: ptrue p0.b
; CHECK-NEXT: csel x8, x1, x8, lo
Expand All @@ -49,7 +49,7 @@ define <vscale x 8 x float> @split_insert_8f32_idx(<vscale x 8 x float> %a, floa
; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 16 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: cnth x8
; CHECK-NEXT: sub x8, x8, #1 // =1
; CHECK-NEXT: sub x8, x8, #1
; CHECK-NEXT: cmp x0, x8
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: csel x8, x0, x8, lo
Expand All @@ -74,7 +74,7 @@ define <vscale x 8 x i64> @split_insert_8i64_idx(<vscale x 8 x i64> %a, i64 %elt
; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: cnth x8
; CHECK-NEXT: sub x8, x8, #1 // =1
; CHECK-NEXT: sub x8, x8, #1
; CHECK-NEXT: cmp x1, x8
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: csel x8, x1, x8, lo
Expand Down Expand Up @@ -136,9 +136,9 @@ define <vscale x 32 x i16> @split_insert_32i16(<vscale x 32 x i16> %a, i16 %elt)
; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 32 * VG
; CHECK-NEXT: .cfi_offset w29, -16
; CHECK-NEXT: rdvl x10, #2
; CHECK-NEXT: sub x10, x10, #1 // =1
; CHECK-NEXT: sub x10, x10, #1
; CHECK-NEXT: mov w9, #128
; CHECK-NEXT: cmp x10, #128 // =128
; CHECK-NEXT: cmp x10, #128
; CHECK-NEXT: ptrue p0.h
; CHECK-NEXT: mov x8, sp
; CHECK-NEXT: csel x9, x10, x9, lo
Expand Down Expand Up @@ -168,7 +168,7 @@ define <vscale x 8 x i32> @split_insert_8i32(<vscale x 8 x i32> %a, i32 %elt) {
; CHECK-NEXT: mov w9, #16960
; CHECK-NEXT: cnth x10
; CHECK-NEXT: movk w9, #15, lsl #16
; CHECK-NEXT: sub x10, x10, #1 // =1
; CHECK-NEXT: sub x10, x10, #1
; CHECK-NEXT: cmp x10, x9
; CHECK-NEXT: ptrue p0.s
; CHECK-NEXT: mov x8, sp
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/AArch64/uadd_sat.ll
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ define i8 @func8(i8 %x, i8 %y) nounwind {
; CHECK: // %bb.0:
; CHECK-NEXT: and w8, w0, #0xff
; CHECK-NEXT: add w8, w8, w1, uxtb
; CHECK-NEXT: cmp w8, #255 // =255
; CHECK-NEXT: cmp w8, #255
; CHECK-NEXT: mov w9, #255
; CHECK-NEXT: csel w0, w8, w9, lo
; CHECK-NEXT: ret
Expand All @@ -59,7 +59,7 @@ define i4 @func3(i4 %x, i4 %y) nounwind {
; CHECK-NEXT: and w8, w1, #0xf
; CHECK-NEXT: and w9, w0, #0xf
; CHECK-NEXT: add w8, w9, w8
; CHECK-NEXT: cmp w8, #15 // =15
; CHECK-NEXT: cmp w8, #15
; CHECK-NEXT: mov w9, #15
; CHECK-NEXT: csel w0, w8, w9, lo
; CHECK-NEXT: ret
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/AArch64/uadd_sat_plus.ll
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ define i8 @func8(i8 %x, i8 %y, i8 %z) nounwind {
; CHECK-NEXT: and w8, w0, #0xff
; CHECK-NEXT: mul w9, w1, w2
; CHECK-NEXT: add w8, w8, w9, uxtb
; CHECK-NEXT: cmp w8, #255 // =255
; CHECK-NEXT: cmp w8, #255
; CHECK-NEXT: mov w9, #255
; CHECK-NEXT: csel w0, w8, w9, lo
; CHECK-NEXT: ret
Expand All @@ -67,7 +67,7 @@ define i4 @func4(i4 %x, i4 %y, i4 %z) nounwind {
; CHECK-NEXT: and w8, w0, #0xf
; CHECK-NEXT: and w9, w9, #0xf
; CHECK-NEXT: add w8, w8, w9
; CHECK-NEXT: cmp w8, #15 // =15
; CHECK-NEXT: cmp w8, #15
; CHECK-NEXT: mov w9, #15
; CHECK-NEXT: csel w0, w8, w9, lo
; CHECK-NEXT: ret
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/AArch64/uadd_sat_vec.ll
Original file line number Diff line number Diff line change
Expand Up @@ -355,7 +355,7 @@ define <2 x i128> @v2i128(<2 x i128> %x, <2 x i128> %y) nounwind {
; CHECK-NEXT: cmp x9, x3
; CHECK-NEXT: cset w11, lo
; CHECK-NEXT: csel w10, w10, w11, eq
; CHECK-NEXT: cmp w10, #0 // =0
; CHECK-NEXT: cmp w10, #0
; CHECK-NEXT: csinv x3, x9, xzr, eq
; CHECK-NEXT: csinv x2, x8, xzr, eq
; CHECK-NEXT: adds x8, x0, x4
Expand All @@ -365,7 +365,7 @@ define <2 x i128> @v2i128(<2 x i128> %x, <2 x i128> %y) nounwind {
; CHECK-NEXT: cmp x9, x1
; CHECK-NEXT: cset w11, lo
; CHECK-NEXT: csel w10, w10, w11, eq
; CHECK-NEXT: cmp w10, #0 // =0
; CHECK-NEXT: cmp w10, #0
; CHECK-NEXT: csinv x8, x8, xzr, eq
; CHECK-NEXT: csinv x1, x9, xzr, eq
; CHECK-NEXT: fmov d0, x8
Expand Down
8 changes: 4 additions & 4 deletions llvm/test/CodeGen/AArch64/uaddo.ll
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
define i1 @uaddo_i64_increment_alt(i64 %x, i64* %p) {
; CHECK-LABEL: uaddo_i64_increment_alt:
; CHECK: // %bb.0:
; CHECK-NEXT: adds x8, x0, #1 // =1
; CHECK-NEXT: adds x8, x0, #1
; CHECK-NEXT: cset w0, hs
; CHECK-NEXT: str x8, [x1]
; CHECK-NEXT: ret
Expand All @@ -23,7 +23,7 @@ define i1 @uaddo_i64_increment_alt(i64 %x, i64* %p) {
define i1 @uaddo_i64_increment_alt_dom(i64 %x, i64* %p) {
; CHECK-LABEL: uaddo_i64_increment_alt_dom:
; CHECK: // %bb.0:
; CHECK-NEXT: adds x8, x0, #1 // =1
; CHECK-NEXT: adds x8, x0, #1
; CHECK-NEXT: cset w0, hs
; CHECK-NEXT: str x8, [x1]
; CHECK-NEXT: ret
Expand All @@ -38,7 +38,7 @@ define i1 @uaddo_i64_increment_alt_dom(i64 %x, i64* %p) {
define i1 @uaddo_i64_decrement_alt(i64 %x, i64* %p) {
; CHECK-LABEL: uaddo_i64_decrement_alt:
; CHECK: // %bb.0:
; CHECK-NEXT: subs x8, x0, #1 // =1
; CHECK-NEXT: subs x8, x0, #1
; CHECK-NEXT: cset w0, hs
; CHECK-NEXT: str x8, [x1]
; CHECK-NEXT: ret
Expand All @@ -53,7 +53,7 @@ define i1 @uaddo_i64_decrement_alt(i64 %x, i64* %p) {
define i1 @uaddo_i64_decrement_alt_dom(i64 %x, i64* %p) {
; CHECK-LABEL: uaddo_i64_decrement_alt_dom:
; CHECK: // %bb.0:
; CHECK-NEXT: subs x8, x0, #1 // =1
; CHECK-NEXT: subs x8, x0, #1
; CHECK-NEXT: cset w0, hs
; CHECK-NEXT: str x8, [x1]
; CHECK-NEXT: ret
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/CodeGen/AArch64/umulo-128-legalisation-lowering.ll
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,10 @@
define { i128, i8 } @muloti_test(i128 %l, i128 %r) unnamed_addr #0 {
; AARCH-LABEL: muloti_test:
; AARCH: // %bb.0: // %start
; AARCH-NEXT: cmp x3, #0 // =0
; AARCH-NEXT: cmp x3, #0
; AARCH-NEXT: umulh x8, x1, x2
; AARCH-NEXT: cset w10, ne
; AARCH-NEXT: cmp x1, #0 // =0
; AARCH-NEXT: cmp x1, #0
; AARCH-NEXT: mul x9, x3, x0
; AARCH-NEXT: cset w11, ne
; AARCH-NEXT: cmp xzr, x8
Expand Down
12 changes: 6 additions & 6 deletions llvm/test/CodeGen/AArch64/unwind-preserved.ll
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,7 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v)
; CHECK: .Lfunc_begin1:
; CHECK-NEXT: .cfi_startproc
; CHECK-NEXT: // %bb.0:
; CHECK-NEXT: sub sp, sp, #304 // =304
; CHECK-NEXT: sub sp, sp, #304
; CHECK-NEXT: stp q23, q22, [sp, #32] // 32-byte Folded Spill
; CHECK-NEXT: stp q21, q20, [sp, #64] // 32-byte Folded Spill
; CHECK-NEXT: stp q19, q18, [sp, #96] // 32-byte Folded Spill
Expand Down Expand Up @@ -310,7 +310,7 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v)
; CHECK-NEXT: ldp q19, q18, [sp, #96] // 32-byte Folded Reload
; CHECK-NEXT: ldp q21, q20, [sp, #64] // 32-byte Folded Reload
; CHECK-NEXT: ldp q23, q22, [sp, #32] // 32-byte Folded Reload
; CHECK-NEXT: add sp, sp, #304 // =304
; CHECK-NEXT: add sp, sp, #304
; CHECK-NEXT: ret
; CHECK-NEXT: .LBB1_2: // %.Lunwind
; CHECK-NEXT: .Ltmp5:
Expand All @@ -324,14 +324,14 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v)
; CHECK-NEXT: ldp q19, q18, [sp, #96] // 32-byte Folded Reload
; CHECK-NEXT: ldp q21, q20, [sp, #64] // 32-byte Folded Reload
; CHECK-NEXT: ldp q23, q22, [sp, #32] // 32-byte Folded Reload
; CHECK-NEXT: add sp, sp, #304 // =304
; CHECK-NEXT: add sp, sp, #304
; CHECK-NEXT: ret
;
; GISEL-LABEL: invoke_callee_may_throw_neon:
; GISEL: .Lfunc_begin1:
; GISEL-NEXT: .cfi_startproc
; GISEL-NEXT: // %bb.0:
; GISEL-NEXT: sub sp, sp, #304 // =304
; GISEL-NEXT: sub sp, sp, #304
; GISEL-NEXT: stp q23, q22, [sp, #32] // 32-byte Folded Spill
; GISEL-NEXT: stp q21, q20, [sp, #64] // 32-byte Folded Spill
; GISEL-NEXT: stp q19, q18, [sp, #96] // 32-byte Folded Spill
Expand Down Expand Up @@ -377,7 +377,7 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v)
; GISEL-NEXT: ldp q19, q18, [sp, #96] // 32-byte Folded Reload
; GISEL-NEXT: ldp q21, q20, [sp, #64] // 32-byte Folded Reload
; GISEL-NEXT: ldp q23, q22, [sp, #32] // 32-byte Folded Reload
; GISEL-NEXT: add sp, sp, #304 // =304
; GISEL-NEXT: add sp, sp, #304
; GISEL-NEXT: ret
; GISEL-NEXT: .LBB1_2: // %.Lunwind
; GISEL-NEXT: .Ltmp5:
Expand All @@ -391,7 +391,7 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v)
; GISEL-NEXT: ldp q19, q18, [sp, #96] // 32-byte Folded Reload
; GISEL-NEXT: ldp q21, q20, [sp, #64] // 32-byte Folded Reload
; GISEL-NEXT: ldp q23, q22, [sp, #32] // 32-byte Folded Reload
; GISEL-NEXT: add sp, sp, #304 // =304
; GISEL-NEXT: add sp, sp, #304
; GISEL-NEXT: ret
%result = invoke aarch64_vector_pcs <4 x i32> @may_throw_neon(<4 x i32> %v) to label %.Lcontinue unwind label %.Lunwind
.Lcontinue:
Expand Down
Loading