Skip to content

Commit

Permalink
[CodeGenPrepare] Check that erased sunken address are not reused
Browse files Browse the repository at this point in the history
CodeGenPrepare sinks address computations from one basic block to another
and attempts to reuse address computations that have already been sunk. If
the same address computation appears twice with the first instance as an
operand of a load whose result is an operand to a simplifable select,
CodeGenPrepare simplifies the select and recursively erases the now dead
instructions. CodeGenPrepare then attempts to use the erased address
computation for the second load.

Fix this by erasing the cached address value if it has zero uses before
looking for the address value in the sunken address map.

This partially resolves PR35209.

Thanks to Alexander Richardson for reporting the issue!

This fixed version relands r318032 which was reverted in r318049 due to
sanitizer buildbot failures.

Reviewers: john.brawn

Differential Revision: https://reviews.llvm.org/D39841

llvm-svn: 318956
  • Loading branch information
Simon Dardis committed Nov 24, 2017
1 parent 0e8924a commit 230f453
Show file tree
Hide file tree
Showing 3 changed files with 80 additions and 5 deletions.
19 changes: 14 additions & 5 deletions llvm/lib/CodeGen/CodeGenPrepare.cpp
Expand Up @@ -245,8 +245,10 @@ class TypePromotionTransaction;

/// Keeps track of non-local addresses that have been sunk into a block.
/// This allows us to avoid inserting duplicate code for blocks with
/// multiple load/stores of the same address.
ValueMap<Value*, Value*> SunkAddrs;
/// multiple load/stores of the same address. The usage of WeakTrackingVH
/// enables SunkAddrs to be treated as a cache whose entries can be
/// invalidated if a sunken address computation has been erased.
ValueMap<Value*, WeakTrackingVH> SunkAddrs;

/// Keeps track of all instructions inserted for the current function.
SetOfInstrs InsertedInsts;
Expand Down Expand Up @@ -4436,9 +4438,13 @@ bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,

// Now that we determined the addressing expression we want to use and know
// that we have to sink it into this block. Check to see if we have already
// done this for some other load/store instr in this block. If so, reuse the
// computation.
Value *&SunkAddr = SunkAddrs[Addr];
// done this for some other load/store instr in this block. If so, reuse
// the computation. Before attempting reuse, check if the address is valid
// as it may have been erased.

WeakTrackingVH SunkAddrVH = SunkAddrs[Addr];

Value * SunkAddr = SunkAddrVH.pointsToAliveValue() ? SunkAddrVH : nullptr;
if (SunkAddr) {
DEBUG(dbgs() << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for "
<< *MemoryInst << "\n");
Expand Down Expand Up @@ -4663,6 +4669,9 @@ bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
}

MemoryInst->replaceUsesOfWith(Repl, SunkAddr);
// Store the newly computed address into the cache. In the case we reused a
// value, this should be idempotent.
SunkAddrs[Addr] = WeakTrackingVH(SunkAddr);

// If we have no uses, recursively delete the value and all dead instructions
// using it.
Expand Down
2 changes: 2 additions & 0 deletions llvm/test/Transforms/CodeGenPrepare/Mips/lit.local.cfg
@@ -0,0 +1,2 @@
if not 'Mips' in config.root.targets:
config.unsupported = True
64 changes: 64 additions & 0 deletions llvm/test/Transforms/CodeGenPrepare/Mips/pr35209.ll
@@ -0,0 +1,64 @@
; RUN: opt -S -mtriple=mips64-mti-linux-gnu -codegenprepare < %s | FileCheck %s

; Test that if an address that was sunk from a dominating bb, used in a
; select that is erased along with its' trivally dead operand, that the
; sunken address is not reused if the same address computation occurs
; after the select. Previously, this caused a ICE.

%struct.az = type { i32, %struct.bt* }
%struct.bt = type { i32 }
%struct.f = type { %struct.ax, %union.anon }
%struct.ax = type { %struct.az* }
%union.anon = type { %struct.bd }
%struct.bd = type { i64 }
%struct.bg = type { i32, i32 }
%struct.ap = type { i32, i32 }

@ch = common global %struct.f zeroinitializer, align 8
@j = common global %struct.az* null, align 8
@ck = common global i32 0, align 4
@h = common global i32 0, align 4
@.str = private unnamed_addr constant [1 x i8] zeroinitializer, align 1

define internal void @probestart() {
entry:
%0 = load %struct.az*, %struct.az** @j, align 8
%bw = getelementptr inbounds %struct.az, %struct.az* %0, i64 0, i32 1
%1 = load i32, i32* @h, align 4
%cond = icmp eq i32 %1, 0
br i1 %cond, label %sw.bb, label %cl

sw.bb: ; preds = %entry
%call = tail call inreg { i64, i64 } @ba(i32* bitcast (%struct.f* @ch to i32*))
br label %cl

cl: ; preds = %sw.bb, %entry
%2 = load %struct.bt*, %struct.bt** %bw, align 8
%tobool = icmp eq %struct.bt* %2, null
%3 = load i32, i32* @ck, align 4
%.sink5 = select i1 %tobool, i32* getelementptr (%struct.bg, %struct.bg* bitcast (%union.anon* getelementptr inbounds (%struct.f, %struct.f* @ch, i64 0, i32 1) to %struct.bg*), i64 0, i32 1), i32* getelementptr (%struct.ap, %struct.ap* bitcast (%union.anon* getelementptr inbounds (%struct.f, %struct.f* @ch, i64 0, i32 1) to %struct.ap*), i64 0, i32 1)
store i32 %3, i32* %.sink5, align 4
store i32 1, i32* bitcast (i64* getelementptr inbounds (%struct.f, %struct.f* @ch, i64 0, i32 1, i32 0, i32 0) to i32*), align 8
%4 = load %struct.bt*, %struct.bt** %bw, align 8
tail call void (i8*, ...) @a(i8* getelementptr inbounds ([1 x i8], [1 x i8]* @.str, i64 0, i64 0), %struct.bt* %4)
ret void
}

; CHECK-LABEL: @probestart()
; CHECK-LABEL: entry:
; CHECK: %[[I0:[0-9]+]] = load %struct.az*, %struct.az** @j
; CHECK-LABEL: cl:

; CHECK-NOT: %{{[0-9]+}} = load %struct.bt*, %struct.bt** %bw
; CHECK-NOT: %{{[.a-z0-9]}} = select
; CHECK-NOT: %{{[0-9]+}} = load %struct.bt*, %struct.bt** %bw

; CHECK: %[[I1:[0-9]+]] = bitcast %struct.az* %[[I0]] to i8*
; CHECK-NEXT: %sunkaddr = getelementptr i8, i8* %[[I1]], i64 8
; CHECK-NEXT: %[[I2:[0-9]+]] = bitcast i8* %sunkaddr to %struct.bt**
; CHECK-NEXT: %{{[0-9]+}} = load %struct.bt*, %struct.bt** %[[I2]]
; CHECK-NEXT: tail call void (i8*, ...) @a

declare inreg { i64, i64 } @ba(i32*)

declare void @a(i8*, ...)

0 comments on commit 230f453

Please sign in to comment.