Skip to content

Commit

Permalink
[AArch64] Add vec3 tests with different load/store alignments.
Browse files Browse the repository at this point in the history
Add extra tests with different load/store alignments for
#78637.
  • Loading branch information
fhahn committed Jan 24, 2024
1 parent c3e7707 commit 98509c7
Showing 1 changed file with 248 additions and 8 deletions.
256 changes: 248 additions & 8 deletions llvm/test/CodeGen/AArch64/vec3-loads-ext-trunc-stores.ll
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,76 @@ define <4 x i32> @load_v3i8_to_4xi32(ptr %src) {
ret <4 x i32> %e
}

define <4 x i32> @load_v3i8_to_4xi32_align_2(ptr %src) {
; CHECK-LABEL: load_v3i8_to_4xi32_align_2:
; CHECK: ; %bb.0:
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: ldrh w8, [x0]
; CHECK-NEXT: movi.2d v1, #0x0000ff000000ff
; CHECK-NEXT: strh w8, [sp, #12]
; CHECK-NEXT: ldr s0, [sp, #12]
; CHECK-NEXT: ldrsb w8, [x0, #2]
; CHECK-NEXT: ushll.8h v0, v0, #0
; CHECK-NEXT: mov.h v0[1], v0[1]
; CHECK-NEXT: mov.h v0[2], w8
; CHECK-NEXT: ushll.4s v0, v0, #0
; CHECK-NEXT: and.16b v0, v0, v1
; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ret
;
; BE-LABEL: load_v3i8_to_4xi32_align_2:
; BE: // %bb.0:
; BE-NEXT: sub sp, sp, #16
; BE-NEXT: .cfi_def_cfa_offset 16
; BE-NEXT: ldrh w8, [x0]
; BE-NEXT: movi v1.2d, #0x0000ff000000ff
; BE-NEXT: strh w8, [sp, #12]
; BE-NEXT: ldr s0, [sp, #12]
; BE-NEXT: ldrsb w8, [x0, #2]
; BE-NEXT: rev32 v0.8b, v0.8b
; BE-NEXT: ushll v0.8h, v0.8b, #0
; BE-NEXT: mov v0.h[1], v0.h[1]
; BE-NEXT: mov v0.h[2], w8
; BE-NEXT: ushll v0.4s, v0.4h, #0
; BE-NEXT: and v0.16b, v0.16b, v1.16b
; BE-NEXT: rev64 v0.4s, v0.4s
; BE-NEXT: ext v0.16b, v0.16b, v0.16b, #8
; BE-NEXT: add sp, sp, #16
; BE-NEXT: ret
%l = load <3 x i8>, ptr %src, align 2
%s = shufflevector <3 x i8> poison, <3 x i8> %l, <4 x i32> <i32 3, i32 4, i32 5, i32 undef>
%e = zext <4 x i8> %s to <4 x i32>
ret <4 x i32> %e
}

define <4 x i32> @load_v3i8_to_4xi32_align_4(ptr %src) {
; CHECK-LABEL: load_v3i8_to_4xi32_align_4:
; CHECK: ; %bb.0:
; CHECK-NEXT: ldr s0, [x0]
; CHECK-NEXT: movi.2d v1, #0x0000ff000000ff
; CHECK-NEXT: zip1.8b v0, v0, v0
; CHECK-NEXT: ushll.4s v0, v0, #0
; CHECK-NEXT: and.16b v0, v0, v1
; CHECK-NEXT: ret
;
; BE-LABEL: load_v3i8_to_4xi32_align_4:
; BE: // %bb.0:
; BE-NEXT: ldr s0, [x0]
; BE-NEXT: movi v1.2d, #0x0000ff000000ff
; BE-NEXT: rev32 v0.8b, v0.8b
; BE-NEXT: zip1 v0.8b, v0.8b, v0.8b
; BE-NEXT: ushll v0.4s, v0.4h, #0
; BE-NEXT: and v0.16b, v0.16b, v1.16b
; BE-NEXT: rev64 v0.4s, v0.4s
; BE-NEXT: ext v0.16b, v0.16b, v0.16b, #8
; BE-NEXT: ret
%l = load <3 x i8>, ptr %src, align 4
%s = shufflevector <3 x i8> poison, <3 x i8> %l, <4 x i32> <i32 3, i32 4, i32 5, i32 undef>
%e = zext <4 x i8> %s to <4 x i32>
ret <4 x i32> %e
}

define <4 x i32> @load_v3i8_to_4xi32_const_offset_1(ptr %src) {
; CHECK-LABEL: load_v3i8_to_4xi32_const_offset_1:
; CHECK: ; %bb.0:
Expand Down Expand Up @@ -176,6 +246,42 @@ define <4 x i32> @load_v3i8_to_4xi32_const_offset_3(ptr %src) {
}

define <4 x i32> @volatile_load_v3i8_to_4xi32(ptr %src) {
; check-label: volatile_load_v3i8_to_4xi32:
; check: ; %bb.0:
; check-next: sub sp, sp, #16
; check-next: .cfi_def_cfa_offset 16
; check-next: ldrh w8, [x0]
; check-next: movi.2d v1, #0x0000ff000000ff
; check-next: strh w8, [sp, #12]
; check-next: ldr s0, [sp, #12]
; check-next: ldrsb w8, [x0, #2]
; check-next: ushll.8h v0, v0, #0
; check-next: mov.h v0[1], v0[1]
; check-next: mov.h v0[2], w8
; check-next: ushll.4s v0, v0, #0
; check-next: and.16b v0, v0, v1
; check-next: add sp, sp, #16
; check-next: ret
;
; be-label: volatile_load_v3i8_to_4xi32:
; be: // %bb.0:
; be-next: sub sp, sp, #16
; be-next: .cfi_def_cfa_offset 16
; be-next: ldrh w8, [x0]
; be-next: movi v1.2d, #0x0000ff000000ff
; be-next: strh w8, [sp, #12]
; be-next: ldr s0, [sp, #12]
; be-next: ldrsb w8, [x0, #2]
; be-next: rev32 v0.8b, v0.8b
; be-next: ushll v0.8h, v0.8b, #0
; be-next: mov v0.h[1], v0.h[1]
; be-next: mov v0.h[2], w8
; be-next: ushll v0.4s, v0.4h, #0
; be-next: and v0.16b, v0.16b, v1.16b
; be-next: rev64 v0.4s, v0.4s
; be-next: ext v0.16b, v0.16b, v0.16b, #8
; be-next: add sp, sp, #16
; be-next: ret
; CHECK-LABEL: volatile_load_v3i8_to_4xi32:
; CHECK: ; %bb.0:
; CHECK-NEXT: sub sp, sp, #16
Expand Down Expand Up @@ -286,9 +392,9 @@ define void @store_trunc_add_from_64bits(ptr %src, ptr %dst) {
; CHECK-NEXT: ldr s0, [x0]
; CHECK-NEXT: add x9, x0, #4
; CHECK-NEXT: Lloh0:
; CHECK-NEXT: adrp x8, lCPI7_0@PAGE
; CHECK-NEXT: adrp x8, lCPI9_0@PAGE
; CHECK-NEXT: Lloh1:
; CHECK-NEXT: ldr d1, [x8, lCPI7_0@PAGEOFF]
; CHECK-NEXT: ldr d1, [x8, lCPI9_0@PAGEOFF]
; CHECK-NEXT: ld1.h { v0 }[2], [x9]
; CHECK-NEXT: add.4h v0, v0, v1
; CHECK-NEXT: xtn.8b v1, v0
Expand All @@ -309,8 +415,8 @@ define void @store_trunc_add_from_64bits(ptr %src, ptr %dst) {
; BE-NEXT: add x8, x0, #4
; BE-NEXT: rev32 v0.4h, v0.4h
; BE-NEXT: ld1 { v0.h }[2], [x8]
; BE-NEXT: adrp x8, .LCPI7_0
; BE-NEXT: add x8, x8, :lo12:.LCPI7_0
; BE-NEXT: adrp x8, .LCPI9_0
; BE-NEXT: add x8, x8, :lo12:.LCPI9_0
; BE-NEXT: ld1 { v1.4h }, [x8]
; BE-NEXT: add v0.4h, v0.4h, v1.4h
; BE-NEXT: xtn v1.8b, v0.8h
Expand Down Expand Up @@ -373,16 +479,74 @@ entry:
ret void
}

define void @load_ext_to_64bits_default_align(ptr %src, ptr %dst) {
; CHECK-LABEL: load_ext_to_64bits_default_align:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: ldr s0, [x0]
; CHECK-NEXT: add x8, x1, #4
; CHECK-NEXT: zip1.8b v0, v0, v0
; CHECK-NEXT: bic.4h v0, #255, lsl #8
; CHECK-NEXT: st1.h { v0 }[2], [x8]
; CHECK-NEXT: str s0, [x1]
; CHECK-NEXT: ret
;
; BE-LABEL: load_ext_to_64bits_default_align:
; BE: // %bb.0: // %entry
; BE-NEXT: ldr s0, [x0]
; BE-NEXT: add x8, x1, #4
; BE-NEXT: rev32 v0.8b, v0.8b
; BE-NEXT: zip1 v0.8b, v0.8b, v0.8b
; BE-NEXT: bic v0.4h, #255, lsl #8
; BE-NEXT: rev32 v1.8h, v0.8h
; BE-NEXT: st1 { v0.h }[2], [x8]
; BE-NEXT: str s1, [x1]
; BE-NEXT: ret
entry:
%l = load <3 x i8>, ptr %src
%e = zext <3 x i8> %l to <3 x i16>
store <3 x i16> %e, ptr %dst, align 1
ret void
}

define void @load_ext_to_64bits_align_4(ptr %src, ptr %dst) {
; CHECK-LABEL: load_ext_to_64bits_align_4:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: ldr s0, [x0]
; CHECK-NEXT: add x8, x1, #4
; CHECK-NEXT: zip1.8b v0, v0, v0
; CHECK-NEXT: bic.4h v0, #255, lsl #8
; CHECK-NEXT: st1.h { v0 }[2], [x8]
; CHECK-NEXT: str s0, [x1]
; CHECK-NEXT: ret
;
; BE-LABEL: load_ext_to_64bits_align_4:
; BE: // %bb.0: // %entry
; BE-NEXT: ldr s0, [x0]
; BE-NEXT: add x8, x1, #4
; BE-NEXT: rev32 v0.8b, v0.8b
; BE-NEXT: zip1 v0.8b, v0.8b, v0.8b
; BE-NEXT: bic v0.4h, #255, lsl #8
; BE-NEXT: rev32 v1.8h, v0.8h
; BE-NEXT: st1 { v0.h }[2], [x8]
; BE-NEXT: str s1, [x1]
; BE-NEXT: ret
entry:
%l = load <3 x i8>, ptr %src, align 4
%e = zext <3 x i8> %l to <3 x i16>
store <3 x i16> %e, ptr %dst, align 1
ret void
}

define void @load_ext_add_to_64bits(ptr %src, ptr %dst) {
; CHECK-LABEL: load_ext_add_to_64bits:
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: ldrh w9, [x0]
; CHECK-NEXT: Lloh2:
; CHECK-NEXT: adrp x8, lCPI9_0@PAGE
; CHECK-NEXT: adrp x8, lCPI13_0@PAGE
; CHECK-NEXT: Lloh3:
; CHECK-NEXT: ldr d1, [x8, lCPI9_0@PAGEOFF]
; CHECK-NEXT: ldr d1, [x8, lCPI13_0@PAGEOFF]
; CHECK-NEXT: add x8, x1, #4
; CHECK-NEXT: strh w9, [sp, #12]
; CHECK-NEXT: add x9, x0, #2
Expand All @@ -408,8 +572,8 @@ define void @load_ext_add_to_64bits(ptr %src, ptr %dst) {
; BE-NEXT: rev32 v0.8b, v0.8b
; BE-NEXT: ushll v0.8h, v0.8b, #0
; BE-NEXT: ld1 { v0.b }[4], [x8]
; BE-NEXT: adrp x8, .LCPI9_0
; BE-NEXT: add x8, x8, :lo12:.LCPI9_0
; BE-NEXT: adrp x8, .LCPI13_0
; BE-NEXT: add x8, x8, :lo12:.LCPI13_0
; BE-NEXT: ld1 { v1.4h }, [x8]
; BE-NEXT: add x8, x1, #4
; BE-NEXT: bic v0.4h, #255, lsl #8
Expand Down Expand Up @@ -465,6 +629,82 @@ define void @shift_trunc_store(ptr %src, ptr %dst) {
ret void
}

define void @shift_trunc_store_default_align(ptr %src, ptr %dst) {
; CHECK-LABEL: shift_trunc_store_default_align:
; CHECK: ; %bb.0:
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: ldr q0, [x0]
; CHECK-NEXT: shrn.4h v0, v0, #16
; CHECK-NEXT: xtn.8b v1, v0
; CHECK-NEXT: umov.h w8, v0[2]
; CHECK-NEXT: str s1, [sp, #12]
; CHECK-NEXT: ldrh w9, [sp, #12]
; CHECK-NEXT: strb w8, [x1, #2]
; CHECK-NEXT: strh w9, [x1]
; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ret
;
; BE-LABEL: shift_trunc_store_default_align:
; BE: // %bb.0:
; BE-NEXT: sub sp, sp, #16
; BE-NEXT: .cfi_def_cfa_offset 16
; BE-NEXT: ld1 { v0.4s }, [x0]
; BE-NEXT: shrn v0.4h, v0.4s, #16
; BE-NEXT: xtn v1.8b, v0.8h
; BE-NEXT: umov w8, v0.h[2]
; BE-NEXT: rev32 v1.16b, v1.16b
; BE-NEXT: str s1, [sp, #12]
; BE-NEXT: ldrh w9, [sp, #12]
; BE-NEXT: strb w8, [x1, #2]
; BE-NEXT: strh w9, [x1]
; BE-NEXT: add sp, sp, #16
; BE-NEXT: ret
%l = load <3 x i32>, ptr %src
%s = lshr <3 x i32> %l, <i32 16, i32 16, i32 16>
%t = trunc <3 x i32> %s to <3 x i8>
store <3 x i8> %t, ptr %dst
ret void
}

define void @shift_trunc_store_align_4(ptr %src, ptr %dst) {
; CHECK-LABEL: shift_trunc_store_align_4:
; CHECK: ; %bb.0:
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: ldr q0, [x0]
; CHECK-NEXT: shrn.4h v0, v0, #16
; CHECK-NEXT: xtn.8b v1, v0
; CHECK-NEXT: umov.h w8, v0[2]
; CHECK-NEXT: str s1, [sp, #12]
; CHECK-NEXT: ldrh w9, [sp, #12]
; CHECK-NEXT: strb w8, [x1, #2]
; CHECK-NEXT: strh w9, [x1]
; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ret
;
; BE-LABEL: shift_trunc_store_align_4:
; BE: // %bb.0:
; BE-NEXT: sub sp, sp, #16
; BE-NEXT: .cfi_def_cfa_offset 16
; BE-NEXT: ld1 { v0.4s }, [x0]
; BE-NEXT: shrn v0.4h, v0.4s, #16
; BE-NEXT: xtn v1.8b, v0.8h
; BE-NEXT: umov w8, v0.h[2]
; BE-NEXT: rev32 v1.16b, v1.16b
; BE-NEXT: str s1, [sp, #12]
; BE-NEXT: ldrh w9, [sp, #12]
; BE-NEXT: strb w8, [x1, #2]
; BE-NEXT: strh w9, [x1]
; BE-NEXT: add sp, sp, #16
; BE-NEXT: ret
%l = load <3 x i32>, ptr %src
%s = lshr <3 x i32> %l, <i32 16, i32 16, i32 16>
%t = trunc <3 x i32> %s to <3 x i8>
store <3 x i8> %t, ptr %dst, align 4
ret void
}

define void @shift_trunc_store_const_offset_1(ptr %src, ptr %dst) {
; CHECK-LABEL: shift_trunc_store_const_offset_1:
; CHECK: ; %bb.0:
Expand Down

0 comments on commit 98509c7

Please sign in to comment.