Skip to content

Commit

Permalink
[AArch64][SVE] Add structured load/store opcodes to getMemOpInfo
Browse files Browse the repository at this point in the history
Currently, loading from or storing to a stack location with a structured load
or store crashes in isAArch64FrameOffsetLegal as the opcodes are not handled by
getMemOpInfo. This patch adds the opcodes for structured load/store instructions
with an immediate index to getMemOpInfo & getLoadStoreImmIdx, setting appropriate
values for the scale, width & min/max offsets.

Reviewed By: sdesmalen, david-arm

Differential Revision: https://reviews.llvm.org/D119338

(cherry picked from commit fc1b212)
  • Loading branch information
kmclaughlin-arm authored and tstellar committed Feb 22, 2022
1 parent cefe687 commit 88f8980
Show file tree
Hide file tree
Showing 4 changed files with 614 additions and 0 deletions.
65 changes: 65 additions & 0 deletions llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2270,6 +2270,19 @@ unsigned AArch64InstrInfo::getLoadStoreImmIdx(unsigned Opc) {
case AArch64::LD1SW_D_IMM:
case AArch64::LD1D_IMM:

case AArch64::LD2B_IMM:
case AArch64::LD2H_IMM:
case AArch64::LD2W_IMM:
case AArch64::LD2D_IMM:
case AArch64::LD3B_IMM:
case AArch64::LD3H_IMM:
case AArch64::LD3W_IMM:
case AArch64::LD3D_IMM:
case AArch64::LD4B_IMM:
case AArch64::LD4H_IMM:
case AArch64::LD4W_IMM:
case AArch64::LD4D_IMM:

case AArch64::ST1B_IMM:
case AArch64::ST1B_H_IMM:
case AArch64::ST1B_S_IMM:
Expand All @@ -2281,6 +2294,19 @@ unsigned AArch64InstrInfo::getLoadStoreImmIdx(unsigned Opc) {
case AArch64::ST1W_D_IMM:
case AArch64::ST1D_IMM:

case AArch64::ST2B_IMM:
case AArch64::ST2H_IMM:
case AArch64::ST2W_IMM:
case AArch64::ST2D_IMM:
case AArch64::ST3B_IMM:
case AArch64::ST3H_IMM:
case AArch64::ST3W_IMM:
case AArch64::ST3D_IMM:
case AArch64::ST4B_IMM:
case AArch64::ST4H_IMM:
case AArch64::ST4W_IMM:
case AArch64::ST4D_IMM:

case AArch64::LD1RB_IMM:
case AArch64::LD1RB_H_IMM:
case AArch64::LD1RB_S_IMM:
Expand Down Expand Up @@ -2897,6 +2923,45 @@ bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
MinOffset = -8;
MaxOffset = 7;
break;
case AArch64::LD2B_IMM:
case AArch64::LD2H_IMM:
case AArch64::LD2W_IMM:
case AArch64::LD2D_IMM:
case AArch64::ST2B_IMM:
case AArch64::ST2H_IMM:
case AArch64::ST2W_IMM:
case AArch64::ST2D_IMM:
Scale = TypeSize::Scalable(32);
Width = SVEMaxBytesPerVector * 2;
MinOffset = -8;
MaxOffset = 7;
break;
case AArch64::LD3B_IMM:
case AArch64::LD3H_IMM:
case AArch64::LD3W_IMM:
case AArch64::LD3D_IMM:
case AArch64::ST3B_IMM:
case AArch64::ST3H_IMM:
case AArch64::ST3W_IMM:
case AArch64::ST3D_IMM:
Scale = TypeSize::Scalable(48);
Width = SVEMaxBytesPerVector * 3;
MinOffset = -8;
MaxOffset = 7;
break;
case AArch64::LD4B_IMM:
case AArch64::LD4H_IMM:
case AArch64::LD4W_IMM:
case AArch64::LD4D_IMM:
case AArch64::ST4B_IMM:
case AArch64::ST4H_IMM:
case AArch64::ST4W_IMM:
case AArch64::ST4D_IMM:
Scale = TypeSize::Scalable(64);
Width = SVEMaxBytesPerVector * 4;
MinOffset = -8;
MaxOffset = 7;
break;
case AArch64::LD1B_H_IMM:
case AArch64::LD1SB_H_IMM:
case AArch64::LD1H_S_IMM:
Expand Down
27 changes: 27 additions & 0 deletions llvm/test/CodeGen/AArch64/sve-fixed-ld2-alloca.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s | FileCheck %s

target triple = "aarch64-unknown-linux-gnu"

define void @st1d_fixed(<8 x double>* %ptr) #0 {
; CHECK-LABEL: st1d_fixed:
; CHECK: // %bb.0:
; CHECK-NEXT: sub sp, sp, #16
; CHECK-NEXT: add x8, sp, #8
; CHECK-NEXT: ptrue p0.d
; CHECK-NEXT: ld2d { z0.d, z1.d }, p0/z, [x8]
; CHECK-NEXT: mov x8, #4
; CHECK-NEXT: mov z0.d, #0 // =0x0
; CHECK-NEXT: st1d { z0.d }, p0, [x0]
; CHECK-NEXT: st1d { z0.d }, p0, [x0, x8, lsl #3]
; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ret
%alloc = alloca [16 x double], i32 0
%bc = bitcast [16 x double]* %alloc to <8 x double>*
%load = load <8 x double>, <8 x double>* %bc
%strided.vec = shufflevector <8 x double> %load, <8 x double> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
store <8 x double> zeroinitializer, <8 x double>* %ptr
ret void
}

attributes #0 = { "target-features"="+sve" vscale_range(2,2) nounwind }
261 changes: 261 additions & 0 deletions llvm/test/CodeGen/AArch64/sve-ldN.mir
Original file line number Diff line number Diff line change
@@ -0,0 +1,261 @@
# RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -run-pass=prologepilog -simplify-mir -verify-machineinstrs %s -o - | FileCheck %s
# RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -start-before=prologepilog -simplify-mir -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK-OFFSET

--- |
define void @testcase_valid_offset() nounwind { entry: unreachable }
define void @testcase_offset_out_of_range() nounwind { entry: unreachable }
...
---
name: testcase_valid_offset
tracksRegLiveness: true
stack:
- { id: 0, name: '', type: default, offset: 0, size: 512, alignment: 16, stack-id: scalable-vector }
body: |
bb.0:
liveins: $p0
; CHECK-LABEL: name: testcase_valid_offset
; CHECK: liveins: $p0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.1)
; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32
; CHECK-NEXT: renamable $z0_z1 = LD2B_IMM renamable $p0, $sp, -8
; CHECK-NEXT: renamable $z0_z1 = LD2B_IMM renamable $p0, $sp, 7
; CHECK-NEXT: renamable $z0_z1 = LD2H_IMM renamable $p0, $sp, -8
; CHECK-NEXT: renamable $z0_z1 = LD2H_IMM renamable $p0, $sp, 7
; CHECK-NEXT: renamable $z0_z1 = LD2W_IMM renamable $p0, $sp, -8
; CHECK-NEXT: renamable $z0_z1 = LD2W_IMM renamable $p0, $sp, 7
; CHECK-NEXT: renamable $z0_z1 = LD2D_IMM renamable $p0, $sp, -8
; CHECK-NEXT: renamable $z0_z1 = LD2D_IMM renamable $p0, $sp, 7
; CHECK-NEXT: renamable $z0_z1_z2 = LD3B_IMM renamable $p0, $sp, -8
; CHECK-NEXT: renamable $z0_z1_z2 = LD3B_IMM renamable $p0, $sp, 7
; CHECK-NEXT: renamable $z0_z1_z2 = LD3H_IMM renamable $p0, $sp, -8
; CHECK-NEXT: renamable $z0_z1_z2 = LD3H_IMM renamable $p0, $sp, 7
; CHECK-NEXT: renamable $z0_z1_z2 = LD3W_IMM renamable $p0, $sp, -8
; CHECK-NEXT: renamable $z0_z1_z2 = LD3W_IMM renamable $p0, $sp, 7
; CHECK-NEXT: renamable $z0_z1_z2 = LD3D_IMM renamable $p0, $sp, -8
; CHECK-NEXT: renamable $z0_z1_z2 = LD3D_IMM renamable $p0, $sp, 7
; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4B_IMM renamable $p0, $sp, -8
; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4B_IMM renamable $p0, $sp, 7
; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4H_IMM renamable $p0, $sp, -8
; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4H_IMM renamable $p0, $sp, 7
; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4W_IMM renamable $p0, $sp, -8
; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4W_IMM renamable $p0, $sp, 7
; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4D_IMM renamable $p0, $sp, -8
; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4D_IMM renamable $p0, $sp, 7
; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31
; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1
; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.1)
; CHECK-NEXT: RET_ReallyLR implicit $z0, implicit $z1, implicit $z2, implicit $z3
; CHECK-OFFSET-LABEL: testcase_valid_offset:
; CHECK-OFFSET: str x29, [sp, #-16]!
; CHECK-OFFSET-NEXT: addvl sp, sp, #-32
; CHECK-OFFSET-NEXT: ld2b { z0.b, z1.b }, p0/z, [sp, #-16, mul vl]
; CHECK-OFFSET-NEXT: ld2b { z0.b, z1.b }, p0/z, [sp, #14, mul vl]
; CHECK-OFFSET-NEXT: ld2h { z0.h, z1.h }, p0/z, [sp, #-16, mul vl]
; CHECK-OFFSET-NEXT: ld2h { z0.h, z1.h }, p0/z, [sp, #14, mul vl]
; CHECK-OFFSET-NEXT: ld2w { z0.s, z1.s }, p0/z, [sp, #-16, mul vl]
; CHECK-OFFSET-NEXT: ld2w { z0.s, z1.s }, p0/z, [sp, #14, mul vl]
; CHECK-OFFSET-NEXT: ld2d { z0.d, z1.d }, p0/z, [sp, #-16, mul vl]
; CHECK-OFFSET-NEXT: ld2d { z0.d, z1.d }, p0/z, [sp, #14, mul vl]
; CHECK-OFFSET-NEXT: ld3b { z0.b, z1.b, z2.b }, p0/z, [sp, #-24, mul vl]
; CHECK-OFFSET-NEXT: ld3b { z0.b, z1.b, z2.b }, p0/z, [sp, #21, mul vl]
; CHECK-OFFSET-NEXT: ld3h { z0.h, z1.h, z2.h }, p0/z, [sp, #-24, mul vl]
; CHECK-OFFSET-NEXT: ld3h { z0.h, z1.h, z2.h }, p0/z, [sp, #21, mul vl]
; CHECK-OFFSET-NEXT: ld3w { z0.s, z1.s, z2.s }, p0/z, [sp, #-24, mul vl]
; CHECK-OFFSET-NEXT: ld3w { z0.s, z1.s, z2.s }, p0/z, [sp, #21, mul vl]
; CHECK-OFFSET-NEXT: ld3d { z0.d, z1.d, z2.d }, p0/z, [sp, #-24, mul vl]
; CHECK-OFFSET-NEXT: ld3d { z0.d, z1.d, z2.d }, p0/z, [sp, #21, mul vl]
; CHECK-OFFSET-NEXT: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [sp, #-32, mul vl]
; CHECK-OFFSET-NEXT: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [sp, #28, mul vl]
; CHECK-OFFSET-NEXT: ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [sp, #-32, mul vl]
; CHECK-OFFSET-NEXT: ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [sp, #28, mul vl]
; CHECK-OFFSET-NEXT: ld4w { z0.s, z1.s, z2.s, z3.s }, p0/z, [sp, #-32, mul vl]
; CHECK-OFFSET-NEXT: ld4w { z0.s, z1.s, z2.s, z3.s }, p0/z, [sp, #28, mul vl]
; CHECK-OFFSET-NEXT: ld4d { z0.d, z1.d, z2.d, z3.d }, p0/z, [sp, #-32, mul vl]
; CHECK-OFFSET-NEXT: ld4d { z0.d, z1.d, z2.d, z3.d }, p0/z, [sp, #28, mul vl]
; CHECK-OFFSET-NEXT: addvl sp, sp, #31
; CHECK-OFFSET-NEXT: addvl sp, sp, #1
; CHECK-OFFSET-NEXT: ldr x29, [sp], #16
; CHECK-OFFSET-NEXT: ret
renamable $z0_z1 = LD2B_IMM renamable $p0, %stack.0, -8
renamable $z0_z1 = LD2B_IMM renamable $p0, %stack.0, 7
renamable $z0_z1 = LD2H_IMM renamable $p0, %stack.0, -8
renamable $z0_z1 = LD2H_IMM renamable $p0, %stack.0, 7
renamable $z0_z1 = LD2W_IMM renamable $p0, %stack.0, -8
renamable $z0_z1 = LD2W_IMM renamable $p0, %stack.0, 7
renamable $z0_z1 = LD2D_IMM renamable $p0, %stack.0, -8
renamable $z0_z1 = LD2D_IMM renamable $p0, %stack.0, 7
renamable $z0_z1_z2 = LD3B_IMM renamable $p0, %stack.0, -8
renamable $z0_z1_z2 = LD3B_IMM renamable $p0, %stack.0, 7
renamable $z0_z1_z2 = LD3H_IMM renamable $p0, %stack.0, -8
renamable $z0_z1_z2 = LD3H_IMM renamable $p0, %stack.0, 7
renamable $z0_z1_z2 = LD3W_IMM renamable $p0, %stack.0, -8
renamable $z0_z1_z2 = LD3W_IMM renamable $p0, %stack.0, 7
renamable $z0_z1_z2 = LD3D_IMM renamable $p0, %stack.0, -8
renamable $z0_z1_z2 = LD3D_IMM renamable $p0, %stack.0, 7
renamable $z0_z1_z2_z3 = LD4B_IMM renamable $p0, %stack.0, -8
renamable $z0_z1_z2_z3 = LD4B_IMM renamable $p0, %stack.0, 7
renamable $z0_z1_z2_z3 = LD4H_IMM renamable $p0, %stack.0, -8
renamable $z0_z1_z2_z3 = LD4H_IMM renamable $p0, %stack.0, 7
renamable $z0_z1_z2_z3 = LD4W_IMM renamable $p0, %stack.0, -8
renamable $z0_z1_z2_z3 = LD4W_IMM renamable $p0, %stack.0, 7
renamable $z0_z1_z2_z3 = LD4D_IMM renamable $p0, %stack.0, -8
renamable $z0_z1_z2_z3 = LD4D_IMM renamable $p0, %stack.0, 7
RET_ReallyLR implicit $z0, implicit $z1, implicit $z2, implicit $z3
...
---
name: testcase_offset_out_of_range
tracksRegLiveness: true
stack:
- { id: 0, name: '', type: default, offset: 0, size: 512, alignment: 16, stack-id: scalable-vector }
body: |
bb.0:
liveins: $p0
; CHECK-LABEL: name: testcase_offset_out_of_range
; CHECK: liveins: $p0
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: early-clobber $sp = frame-setup STRXpre killed $fp, $sp, -16 :: (store (s64) into %stack.1)
; CHECK-NEXT: $sp = frame-setup ADDVL_XXI $sp, -32
; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -2
; CHECK-NEXT: renamable $z0_z1 = LD2B_IMM renamable $p0, killed $x8, -8
; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 2
; CHECK-NEXT: renamable $z0_z1 = LD2B_IMM renamable $p0, killed $x8, 7
; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -2
; CHECK-NEXT: renamable $z0_z1 = LD2H_IMM renamable $p0, killed $x8, -8
; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 2
; CHECK-NEXT: renamable $z0_z1 = LD2H_IMM renamable $p0, killed $x8, 7
; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -2
; CHECK-NEXT: renamable $z0_z1 = LD2W_IMM renamable $p0, killed $x8, -8
; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 2
; CHECK-NEXT: renamable $z0_z1 = LD2W_IMM renamable $p0, killed $x8, 7
; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -2
; CHECK-NEXT: renamable $z0_z1 = LD2D_IMM renamable $p0, killed $x8, -8
; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 2
; CHECK-NEXT: renamable $z0_z1 = LD2D_IMM renamable $p0, killed $x8, 7
; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -3
; CHECK-NEXT: renamable $z0_z1_z2 = LD3B_IMM renamable $p0, killed $x8, -8
; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 3
; CHECK-NEXT: renamable $z0_z1_z2 = LD3B_IMM renamable $p0, killed $x8, 7
; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -3
; CHECK-NEXT: renamable $z0_z1_z2 = LD3H_IMM renamable $p0, killed $x8, -8
; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 3
; CHECK-NEXT: renamable $z0_z1_z2 = LD3H_IMM renamable $p0, killed $x8, 7
; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -3
; CHECK-NEXT: renamable $z0_z1_z2 = LD3W_IMM renamable $p0, killed $x8, -8
; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 3
; CHECK-NEXT: renamable $z0_z1_z2 = LD3W_IMM renamable $p0, killed $x8, 7
; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -3
; CHECK-NEXT: renamable $z0_z1_z2 = LD3D_IMM renamable $p0, killed $x8, -8
; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 3
; CHECK-NEXT: renamable $z0_z1_z2 = LD3D_IMM renamable $p0, killed $x8, 7
; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -4
; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4B_IMM renamable $p0, killed $x8, -8
; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 4
; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4B_IMM renamable $p0, killed $x8, 7
; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -4
; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4H_IMM renamable $p0, killed $x8, -8
; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 4
; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4H_IMM renamable $p0, killed $x8, 7
; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -4
; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4W_IMM renamable $p0, killed $x8, -8
; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 4
; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4W_IMM renamable $p0, killed $x8, 7
; CHECK-NEXT: $x8 = ADDVL_XXI $sp, -4
; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4D_IMM renamable $p0, killed $x8, -8
; CHECK-NEXT: $x8 = ADDVL_XXI $sp, 4
; CHECK-NEXT: renamable $z0_z1_z2_z3 = LD4D_IMM renamable $p0, killed $x8, 7
; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 31
; CHECK-NEXT: $sp = frame-destroy ADDVL_XXI $sp, 1
; CHECK-NEXT: early-clobber $sp, $fp = frame-destroy LDRXpost $sp, 16 :: (load (s64) from %stack.1)
; CHECK-NEXT: RET_ReallyLR implicit $z0, implicit $z1, implicit $z2, implicit $z3
; CHECK-OFFSET-LABEL: testcase_offset_out_of_range:
; CHECK-OFFSET: str x29, [sp, #-16]!
; CHECK-OFFSET-NEXT: addvl sp, sp, #-32
; CHECK-OFFSET-NEXT: addvl x8, sp, #-2
; CHECK-OFFSET-NEXT: ld2b { z0.b, z1.b }, p0/z, [x8, #-16, mul vl]
; CHECK-OFFSET-NEXT: addvl x8, sp, #2
; CHECK-OFFSET-NEXT: ld2b { z0.b, z1.b }, p0/z, [x8, #14, mul vl]
; CHECK-OFFSET-NEXT: addvl x8, sp, #-2
; CHECK-OFFSET-NEXT: ld2h { z0.h, z1.h }, p0/z, [x8, #-16, mul vl]
; CHECK-OFFSET-NEXT: addvl x8, sp, #2
; CHECK-OFFSET-NEXT: ld2h { z0.h, z1.h }, p0/z, [x8, #14, mul vl]
; CHECK-OFFSET-NEXT: addvl x8, sp, #-2
; CHECK-OFFSET-NEXT: ld2w { z0.s, z1.s }, p0/z, [x8, #-16, mul vl]
; CHECK-OFFSET-NEXT: addvl x8, sp, #2
; CHECK-OFFSET-NEXT: ld2w { z0.s, z1.s }, p0/z, [x8, #14, mul vl]
; CHECK-OFFSET-NEXT: addvl x8, sp, #-2
; CHECK-OFFSET-NEXT: ld2d { z0.d, z1.d }, p0/z, [x8, #-16, mul vl]
; CHECK-OFFSET-NEXT: addvl x8, sp, #2
; CHECK-OFFSET-NEXT: ld2d { z0.d, z1.d }, p0/z, [x8, #14, mul vl]
; CHECK-OFFSET-NEXT: addvl x8, sp, #-3
; CHECK-OFFSET-NEXT: ld3b { z0.b, z1.b, z2.b }, p0/z, [x8, #-24, mul vl]
; CHECK-OFFSET-NEXT: addvl x8, sp, #3
; CHECK-OFFSET-NEXT: ld3b { z0.b, z1.b, z2.b }, p0/z, [x8, #21, mul vl]
; CHECK-OFFSET-NEXT: addvl x8, sp, #-3
; CHECK-OFFSET-NEXT: ld3h { z0.h, z1.h, z2.h }, p0/z, [x8, #-24, mul vl]
; CHECK-OFFSET-NEXT: addvl x8, sp, #3
; CHECK-OFFSET-NEXT: ld3h { z0.h, z1.h, z2.h }, p0/z, [x8, #21, mul vl]
; CHECK-OFFSET-NEXT: addvl x8, sp, #-3
; CHECK-OFFSET-NEXT: ld3w { z0.s, z1.s, z2.s }, p0/z, [x8, #-24, mul vl]
; CHECK-OFFSET-NEXT: addvl x8, sp, #3
; CHECK-OFFSET-NEXT: ld3w { z0.s, z1.s, z2.s }, p0/z, [x8, #21, mul vl]
; CHECK-OFFSET-NEXT: addvl x8, sp, #-3
; CHECK-OFFSET-NEXT: ld3d { z0.d, z1.d, z2.d }, p0/z, [x8, #-24, mul vl]
; CHECK-OFFSET-NEXT: addvl x8, sp, #3
; CHECK-OFFSET-NEXT: ld3d { z0.d, z1.d, z2.d }, p0/z, [x8, #21, mul vl]
; CHECK-OFFSET-NEXT: addvl x8, sp, #-4
; CHECK-OFFSET-NEXT: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x8, #-32, mul vl]
; CHECK-OFFSET-NEXT: addvl x8, sp, #4
; CHECK-OFFSET-NEXT: ld4b { z0.b, z1.b, z2.b, z3.b }, p0/z, [x8, #28, mul vl]
; CHECK-OFFSET-NEXT: addvl x8, sp, #-4
; CHECK-OFFSET-NEXT: ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x8, #-32, mul vl]
; CHECK-OFFSET-NEXT: addvl x8, sp, #4
; CHECK-OFFSET-NEXT: ld4h { z0.h, z1.h, z2.h, z3.h }, p0/z, [x8, #28, mul vl]
; CHECK-OFFSET-NEXT: addvl x8, sp, #-4
; CHECK-OFFSET-NEXT: ld4w { z0.s, z1.s, z2.s, z3.s }, p0/z, [x8, #-32, mul vl]
; CHECK-OFFSET-NEXT: addvl x8, sp, #4
; CHECK-OFFSET-NEXT: ld4w { z0.s, z1.s, z2.s, z3.s }, p0/z, [x8, #28, mul vl]
; CHECK-OFFSET-NEXT: addvl x8, sp, #-4
; CHECK-OFFSET-NEXT: ld4d { z0.d, z1.d, z2.d, z3.d }, p0/z, [x8, #-32, mul vl]
; CHECK-OFFSET-NEXT: addvl x8, sp, #4
; CHECK-OFFSET-NEXT: ld4d { z0.d, z1.d, z2.d, z3.d }, p0/z, [x8, #28, mul vl]
; CHECK-OFFSET-NEXT: addvl sp, sp, #31
; CHECK-OFFSET-NEXT: addvl sp, sp, #1
; CHECK-OFFSET-NEXT: ldr x29, [sp], #16
; CHECK-OFFSET-NEXT: ret
renamable $z0_z1 = LD2B_IMM renamable $p0, %stack.0, -9
renamable $z0_z1 = LD2B_IMM renamable $p0, %stack.0, 8
renamable $z0_z1 = LD2H_IMM renamable $p0, %stack.0, -9
renamable $z0_z1 = LD2H_IMM renamable $p0, %stack.0, 8
renamable $z0_z1 = LD2W_IMM renamable $p0, %stack.0, -9
renamable $z0_z1 = LD2W_IMM renamable $p0, %stack.0, 8
renamable $z0_z1 = LD2D_IMM renamable $p0, %stack.0, -9
renamable $z0_z1 = LD2D_IMM renamable $p0, %stack.0, 8
renamable $z0_z1_z2 = LD3B_IMM renamable $p0, %stack.0, -9
renamable $z0_z1_z2 = LD3B_IMM renamable $p0, %stack.0, 8
renamable $z0_z1_z2 = LD3H_IMM renamable $p0, %stack.0, -9
renamable $z0_z1_z2 = LD3H_IMM renamable $p0, %stack.0, 8
renamable $z0_z1_z2 = LD3W_IMM renamable $p0, %stack.0, -9
renamable $z0_z1_z2 = LD3W_IMM renamable $p0, %stack.0, 8
renamable $z0_z1_z2 = LD3D_IMM renamable $p0, %stack.0, -9
renamable $z0_z1_z2 = LD3D_IMM renamable $p0, %stack.0, 8
renamable $z0_z1_z2_z3 = LD4B_IMM renamable $p0, %stack.0, -9
renamable $z0_z1_z2_z3 = LD4B_IMM renamable $p0, %stack.0, 8
renamable $z0_z1_z2_z3 = LD4H_IMM renamable $p0, %stack.0, -9
renamable $z0_z1_z2_z3 = LD4H_IMM renamable $p0, %stack.0, 8
renamable $z0_z1_z2_z3 = LD4W_IMM renamable $p0, %stack.0, -9
renamable $z0_z1_z2_z3 = LD4W_IMM renamable $p0, %stack.0, 8
renamable $z0_z1_z2_z3 = LD4D_IMM renamable $p0, %stack.0, -9
renamable $z0_z1_z2_z3 = LD4D_IMM renamable $p0, %stack.0, 8
RET_ReallyLR implicit $z0, implicit $z1, implicit $z2, implicit $z3
...

0 comments on commit 88f8980

Please sign in to comment.