diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 31b4dc9d9cae6..948419f29b48e 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -15519,15 +15519,15 @@ bool AArch64TargetLowering::isLegalAddressingMode(const DataLayout &DL, if (!AM.HasBaseReg) return false; - // FIXME: Update this method to support scalable addressing modes. - if (Ty->isScalableTargetExtTy()) - return AM.HasBaseReg && !AM.BaseOffs && !AM.Scale; + if (Ty->isScalableTy()) { + if (isa(Ty)) { + uint64_t VecElemNumBytes = + DL.getTypeSizeInBits(cast(Ty)->getElementType()) / 8; + return AM.HasBaseReg && !AM.BaseOffs && + (AM.Scale == 0 || (uint64_t)AM.Scale == VecElemNumBytes); + } - if (isa(Ty)) { - uint64_t VecElemNumBytes = - DL.getTypeSizeInBits(cast(Ty)->getElementType()) / 8; - return AM.HasBaseReg && !AM.BaseOffs && - (AM.Scale == 0 || (uint64_t)AM.Scale == VecElemNumBytes); + return AM.HasBaseReg && !AM.BaseOffs && !AM.Scale; } // check reg + imm case: diff --git a/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll new file mode 100644 index 0000000000000..0984b6b67045e --- /dev/null +++ b/llvm/test/CodeGen/AArch64/alloca-load-store-scalable-struct.ll @@ -0,0 +1,35 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 +; RUN: llc < %s | FileCheck %s + +target triple = "aarch64-unknown-linux-gnu" + +%struct.test = type { , , } + +define void @test(ptr %addr) #0 { +; CHECK-LABEL: test: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill +; CHECK-NEXT: addvl sp, sp, #-3 +; CHECK-NEXT: .cfi_escape 0x0f, 0x0c, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 24 * VG +; CHECK-NEXT: .cfi_offset w29, -16 +; CHECK-NEXT: mov x8, #2 // =0x2 +; CHECK-NEXT: mov x9, #4 // =0x4 +; CHECK-NEXT: ptrue p0.d +; CHECK-NEXT: mov x10, sp +; CHECK-NEXT: ld1d { z0.d }, p0/z, [x0, x8, lsl #3] +; CHECK-NEXT: ld1d { z1.d }, p0/z, [x0, x9, lsl #3] +; CHECK-NEXT: ld1d { z2.d }, p0/z, [x0] +; CHECK-NEXT: st1d { z0.d }, p0, [x10, x8, lsl #3] +; CHECK-NEXT: st1d { z1.d }, p0, [x10, x9, lsl #3] +; CHECK-NEXT: st1d { z2.d }, p0, [sp] +; CHECK-NEXT: addvl sp, sp, #3 +; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload +; CHECK-NEXT: ret +entry: + %ret = alloca %struct.test, align 8 + %val = load %struct.test, ptr %addr + store %struct.test %val, ptr %ret, align 8 + ret void +} + +attributes #0 = { "target-features"="+sve" }