diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index b96d7c34161cb..815c94b07a330 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -18922,6 +18922,11 @@ ShrinkLoadReplaceStoreWithStore(const std::pair &MaskInfo, UseTruncStore = true; else return SDValue(); + + // Can't do this for indexed stores. + if (St->isIndexed()) + return SDValue(); + // Check that the target doesn't think this is a bad idea. if (St->getMemOperand() && !TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT, diff --git a/llvm/test/CodeGen/AArch64/replace-load-with-shrink-store-indexed-crash.ll b/llvm/test/CodeGen/AArch64/replace-load-with-shrink-store-indexed-crash.ll new file mode 100644 index 0000000000000..61707cd929cd3 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/replace-load-with-shrink-store-indexed-crash.ll @@ -0,0 +1,31 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 +; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s +target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128" +target triple = "arm64-apple-macosx13.0.0" + +define void @_Z1hP1f(ptr %j) { +; CHECK-LABEL: _Z1hP1f: +; CHECK: // %bb.0: // %entry +; CHECK-NEXT: mov x8, x0 +; CHECK-NEXT: .LBB0_1: // %for.body +; CHECK-NEXT: // =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: ldr w9, [x8] +; CHECK-NEXT: mov w10, w9 +; CHECK-NEXT: bfi w10, w9, #16, #16 +; CHECK-NEXT: str w10, [x8], #4 +; CHECK-NEXT: str w9, [x0] +; CHECK-NEXT: b .LBB0_1 +entry: + br label %for.body + +for.body: ; preds = %for.body, %entry + %indvars.iv1 = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] + %arrayidx3 = getelementptr i32, ptr %j, i64 %indvars.iv1 + %0 = load i32, ptr %arrayidx3, align 4 + %and = and i32 %0, 65535 + %or = mul i32 %and, 65537 + store i32 %or, ptr %arrayidx3, align 4 + store i32 %0, ptr %j, align 4 + %indvars.iv.next = add i64 %indvars.iv1, 1 + br label %for.body +}