Skip to content

Commit

Permalink
[MemoryBuiltins][FIX] Adjust index type size properly wrt. AS casts
Browse files Browse the repository at this point in the history
Use existing functionality to strip constant offsets that works well
with AS casts and avoids the code duplication.

Since we strip AS casts during the computation of the offset we also
need to adjust the APInt properly to avoid mismatches in the bit width.
This code ensures the caller of `compute` sees APInts that match the
index type size of the value passed to `compute`, not the value result
of the strip pointer cast.

Fixes #53559.

Differential Revision: https://reviews.llvm.org/D118727

(cherry picked from commit 29c8eba)
  • Loading branch information
jdoerfert authored and tstellar committed Feb 21, 2022
1 parent c06cc1c commit f3cfaf8
Show file tree
Hide file tree
Showing 3 changed files with 72 additions and 20 deletions.
2 changes: 1 addition & 1 deletion llvm/include/llvm/Analysis/MemoryBuiltins.h
Expand Up @@ -210,7 +210,6 @@ class ObjectSizeOffsetVisitor
SizeOffsetType visitConstantPointerNull(ConstantPointerNull&);
SizeOffsetType visitExtractElementInst(ExtractElementInst &I);
SizeOffsetType visitExtractValueInst(ExtractValueInst &I);
SizeOffsetType visitGEPOperator(GEPOperator &GEP);
SizeOffsetType visitGlobalAlias(GlobalAlias &GA);
SizeOffsetType visitGlobalVariable(GlobalVariable &GV);
SizeOffsetType visitIntToPtrInst(IntToPtrInst&);
Expand All @@ -221,6 +220,7 @@ class ObjectSizeOffsetVisitor
SizeOffsetType visitInstruction(Instruction &I);

private:
SizeOffsetType computeImpl(Value *V);
bool CheckedZextOrTrunc(APInt &I);
};

Expand Down
51 changes: 33 additions & 18 deletions llvm/lib/Analysis/MemoryBuiltins.cpp
Expand Up @@ -573,18 +573,48 @@ ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout &DL,
}

SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) {
unsigned InitialIntTyBits = DL.getIndexTypeSizeInBits(V->getType());

// Stripping pointer casts can strip address space casts which can change the
// index type size. The invariant is that we use the value type to determine
// the index type size and if we stripped address space casts we have to
// readjust the APInt as we pass it upwards in order for the APInt to match
// the type the caller passed in.
APInt Offset(InitialIntTyBits, 0);
V = V->stripAndAccumulateConstantOffsets(
DL, Offset, /* AllowNonInbounds */ true, /* AllowInvariantGroup */ true);

// Later we use the index type size and zero but it will match the type of the
// value that is passed to computeImpl.
IntTyBits = DL.getIndexTypeSizeInBits(V->getType());
Zero = APInt::getZero(IntTyBits);

V = V->stripPointerCasts();
bool IndexTypeSizeChanged = InitialIntTyBits != IntTyBits;
if (!IndexTypeSizeChanged && Offset.isZero())
return computeImpl(V);

// We stripped an address space cast that changed the index type size or we
// accumulated some constant offset (or both). Readjust the bit width to match
// the argument index type size and apply the offset, as required.
SizeOffsetType SOT = computeImpl(V);
if (IndexTypeSizeChanged) {
if (knownSize(SOT) && !::CheckedZextOrTrunc(SOT.first, InitialIntTyBits))
SOT.first = APInt();
if (knownOffset(SOT) && !::CheckedZextOrTrunc(SOT.second, InitialIntTyBits))
SOT.second = APInt();
}
// If the computed offset is "unknown" we cannot add the stripped offset.
return {SOT.first,
SOT.second.getBitWidth() > 1 ? SOT.second + Offset : SOT.second};
}

SizeOffsetType ObjectSizeOffsetVisitor::computeImpl(Value *V) {
if (Instruction *I = dyn_cast<Instruction>(V)) {
// If we have already seen this instruction, bail out. Cycles can happen in
// unreachable code after constant propagation.
if (!SeenInsts.insert(I).second)
return unknown();

if (GEPOperator *GEP = dyn_cast<GEPOperator>(V))
return visitGEPOperator(*GEP);
return visit(*I);
}
if (Argument *A = dyn_cast<Argument>(V))
Expand All @@ -597,12 +627,6 @@ SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) {
return visitGlobalVariable(*GV);
if (UndefValue *UV = dyn_cast<UndefValue>(V))
return visitUndefValue(*UV);
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
if (CE->getOpcode() == Instruction::IntToPtr)
return unknown(); // clueless
if (CE->getOpcode() == Instruction::GetElementPtr)
return visitGEPOperator(cast<GEPOperator>(*CE));
}

LLVM_DEBUG(dbgs() << "ObjectSizeOffsetVisitor::compute() unhandled value: "
<< *V << '\n');
Expand Down Expand Up @@ -682,15 +706,6 @@ ObjectSizeOffsetVisitor::visitExtractValueInst(ExtractValueInst&) {
return unknown();
}

SizeOffsetType ObjectSizeOffsetVisitor::visitGEPOperator(GEPOperator &GEP) {
SizeOffsetType PtrData = compute(GEP.getPointerOperand());
APInt Offset(DL.getIndexTypeSizeInBits(GEP.getPointerOperand()->getType()), 0);
if (!bothKnown(PtrData) || !GEP.accumulateConstantOffset(DL, Offset))
return unknown();

return std::make_pair(PtrData.first, PtrData.second + Offset);
}

SizeOffsetType ObjectSizeOffsetVisitor::visitGlobalAlias(GlobalAlias &GA) {
if (GA.isInterposable())
return unknown();
Expand Down
39 changes: 38 additions & 1 deletion llvm/test/Transforms/InstCombine/builtin-dynamic-object-size.ll
@@ -1,6 +1,6 @@
; RUN: opt -instcombine -S < %s | FileCheck %s

target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128-p7:32:32"
target triple = "x86_64-apple-macosx10.14.0"

; Function Attrs: nounwind ssp uwtable
Expand Down Expand Up @@ -152,6 +152,43 @@ if.end: ; preds = %if.else, %if.then
; CHECK-NEXT: br i1 false, label %if.else, label %if.then
; CHECK: call void @fortified_chk(i8* %obj, i64 [[SZ]])

@p7 = internal addrspace(7) global i8 0

; Gracefully handle AS cast when the address spaces have different pointer widths.
define i64 @as_cast(i1 %c) {
; CHECK: [[TMP0:%.*]] = select i1 %c, i64 64, i64 1
; CHECK: [[NOT:%.*]] = xor i1 %c, true
; CHECK: [[NEG:%.*]] = sext i1 [[NOT]] to i64
; CHECK: [[TMP1:%.*]] = add nsw i64 [[TMP0]], [[NEG]]
; CHECK: [[TMP2:%.*]] = icmp ne i64 [[TMP1]], -1
; CHECK: call void @llvm.assume(i1 [[TMP2]])
; CHECK: ret i64 [[TMP1]]
;
entry:
%p0 = tail call i8* @malloc(i64 64)
%gep = getelementptr i8, i8 addrspace(7)* @p7, i32 1
%as = addrspacecast i8 addrspace(7)* %gep to i8*
%select = select i1 %c, i8* %p0, i8* %as
%calc_size = tail call i64 @llvm.objectsize.i64.p0i8(i8* %select, i1 false, i1 true, i1 true)
ret i64 %calc_size
}

define i64 @constexpr_as_cast(i1 %c) {
; CHECK: [[TMP0:%.*]] = select i1 %c, i64 64, i64 1
; CHECK: [[NOT:%.*]] = xor i1 %c, true
; CHECK: [[NEG:%.*]] = sext i1 [[NOT]] to i64
; CHECK: [[TMP1:%.*]] = add nsw i64 [[TMP0]], [[NEG]]
; CHECK: [[TMP2:%.*]] = icmp ne i64 [[TMP1]], -1
; CHECK: call void @llvm.assume(i1 [[TMP2]])
; CHECK: ret i64 [[TMP1]]
;
entry:
%p0 = tail call i8* @malloc(i64 64)
%select = select i1 %c, i8* %p0, i8* addrspacecast (i8 addrspace(7)* getelementptr (i8, i8 addrspace(7)* @p7, i32 1) to i8*)
%calc_size = tail call i64 @llvm.objectsize.i64.p0i8(i8* %select, i1 false, i1 true, i1 true)
ret i64 %calc_size
}

declare void @bury(i32) local_unnamed_addr #2

; Function Attrs: nounwind allocsize(0)
Expand Down

0 comments on commit f3cfaf8

Please sign in to comment.