53 changes: 25 additions & 28 deletions llvm/include/llvm/Analysis/ValueTracking.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,12 +33,12 @@ namespace llvm {
/// them in the KnownZero/KnownOne bit sets.
///
/// This function is defined on values with integer type, values with pointer
/// type (but only if TD is non-null), and vectors of integers. In the case
/// type, and vectors of integers. In the case
/// where V is a vector, the known zero and known one values are the
/// same width as the vector element, and the bit is set only if it is true
/// for all of the elements in the vector.
void computeKnownBits(Value *V, APInt &KnownZero, APInt &KnownOne,
const DataLayout *TD = nullptr, unsigned Depth = 0,
const DataLayout &DL, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
Expand All @@ -50,7 +50,7 @@ namespace llvm {
/// ComputeSignBit - Determine whether the sign bit is known to be zero or
/// one. Convenience wrapper around computeKnownBits.
void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
const DataLayout *TD = nullptr, unsigned Depth = 0,
const DataLayout &DL, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
Expand All @@ -60,7 +60,8 @@ namespace llvm {
/// element is known to be a power of two when defined. Supports values with
/// integer or pointer type and vectors of integers. If 'OrZero' is set then
/// returns true if the given value is either a power of two or zero.
bool isKnownToBeAPowerOfTwo(Value *V, bool OrZero = false, unsigned Depth = 0,
bool isKnownToBeAPowerOfTwo(Value *V, const DataLayout &DL,
bool OrZero = false, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
Expand All @@ -69,8 +70,8 @@ namespace llvm {
/// when defined. For vectors return true if every element is known to be
/// non-zero when defined. Supports values with integer or pointer type and
/// vectors of integers.
bool isKnownNonZero(Value *V, const DataLayout *TD = nullptr,
unsigned Depth = 0, AssumptionCache *AC = nullptr,
bool isKnownNonZero(Value *V, const DataLayout &DL, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);

Expand All @@ -79,13 +80,12 @@ namespace llvm {
/// zero for bits that V cannot have.
///
/// This function is defined on values with integer type, values with pointer
/// type (but only if TD is non-null), and vectors of integers. In the case
/// type, and vectors of integers. In the case
/// where V is a vector, the mask, known zero, and known one values are the
/// same width as the vector element, and the bit is set only if it is true
/// for all of the elements in the vector.
bool MaskedValueIsZero(Value *V, const APInt &Mask,
const DataLayout *TD = nullptr, unsigned Depth = 0,
AssumptionCache *AC = nullptr,
bool MaskedValueIsZero(Value *V, const APInt &Mask, const DataLayout &DL,
unsigned Depth = 0, AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);

Expand All @@ -97,7 +97,7 @@ namespace llvm {
///
/// 'Op' must have a scalar integer type.
///
unsigned ComputeNumSignBits(Value *Op, const DataLayout *TD = nullptr,
unsigned ComputeNumSignBits(Value *Op, const DataLayout &DL,
unsigned Depth = 0, AssumptionCache *AC = nullptr,
const Instruction *CxtI = nullptr,
const DominatorTree *DT = nullptr);
Expand Down Expand Up @@ -142,11 +142,12 @@ namespace llvm {
/// it can be expressed as a base pointer plus a constant offset. Return the
/// base and offset to the caller.
Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
const DataLayout *TD);
const DataLayout &DL);
static inline const Value *
GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset,
const DataLayout *TD) {
return GetPointerBaseWithConstantOffset(const_cast<Value*>(Ptr), Offset,TD);
const DataLayout &DL) {
return GetPointerBaseWithConstantOffset(const_cast<Value *>(Ptr), Offset,
DL);
}

/// getConstantStringInfo - This function computes the length of a
Expand All @@ -167,21 +168,19 @@ namespace llvm {
/// being addressed. Note that the returned value has pointer type if the
/// specified value does. If the MaxLookup value is non-zero, it limits the
/// number of instructions to be stripped off.
Value *GetUnderlyingObject(Value *V, const DataLayout *TD = nullptr,
Value *GetUnderlyingObject(Value *V, const DataLayout &DL,
unsigned MaxLookup = 6);
static inline const Value *
GetUnderlyingObject(const Value *V, const DataLayout *TD = nullptr,
unsigned MaxLookup = 6) {
return GetUnderlyingObject(const_cast<Value *>(V), TD, MaxLookup);
static inline const Value *GetUnderlyingObject(const Value *V,
const DataLayout &DL,
unsigned MaxLookup = 6) {
return GetUnderlyingObject(const_cast<Value *>(V), DL, MaxLookup);
}

/// GetUnderlyingObjects - This method is similar to GetUnderlyingObject
/// except that it can look through phi and select instructions and return
/// multiple objects.
void GetUnderlyingObjects(Value *V,
SmallVectorImpl<Value *> &Objects,
const DataLayout *TD = nullptr,
unsigned MaxLookup = 6);
void GetUnderlyingObjects(Value *V, SmallVectorImpl<Value *> &Objects,
const DataLayout &DL, unsigned MaxLookup = 6);

/// onlyUsedByLifetimeMarkers - Return true if the only users of this pointer
/// are lifetime markers.
Expand All @@ -205,8 +204,7 @@ namespace llvm {
/// the correct dominance relationships for the operands and users hold.
/// However, this method can return true for instructions that read memory;
/// for such instructions, moving them may change the resulting value.
bool isSafeToSpeculativelyExecute(const Value *V,
const DataLayout *TD = nullptr);
bool isSafeToSpeculativelyExecute(const Value *V);

/// isKnownNonNull - Return true if this pointer couldn't possibly be null by
/// its definition. This returns true for allocas, non-extern-weak globals
Expand All @@ -217,17 +215,16 @@ namespace llvm {
/// assume intrinsic, I, at the point in the control-flow identified by the
/// context instruction, CxtI.
bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI,
const DataLayout *DL = nullptr,
const DominatorTree *DT = nullptr);

enum class OverflowResult { AlwaysOverflows, MayOverflow, NeverOverflows };
OverflowResult computeOverflowForUnsignedMul(Value *LHS, Value *RHS,
const DataLayout *DL,
const DataLayout &DL,
AssumptionCache *AC,
const Instruction *CxtI,
const DominatorTree *DT);
OverflowResult computeOverflowForUnsignedAdd(Value *LHS, Value *RHS,
const DataLayout *DL,
const DataLayout &DL,
AssumptionCache *AC,
const Instruction *CxtI,
const DominatorTree *DT);
Expand Down
6 changes: 3 additions & 3 deletions llvm/include/llvm/IR/IRBuilder.h
Original file line number Diff line number Diff line change
Expand Up @@ -351,8 +351,8 @@ class IRBuilderBase {
}

/// \brief Fetch the type representing a pointer to an integer value.
IntegerType* getIntPtrTy(const DataLayout *DL, unsigned AddrSpace = 0) {
return DL->getIntPtrType(Context, AddrSpace);
IntegerType *getIntPtrTy(const DataLayout &DL, unsigned AddrSpace = 0) {
return DL.getIntPtrType(Context, AddrSpace);
}

//===--------------------------------------------------------------------===//
Expand Down Expand Up @@ -1595,7 +1595,7 @@ class IRBuilder : public IRBuilderBase, public Inserter {
"trying to create an alignment assumption on a non-pointer?");

PointerType *PtrTy = cast<PointerType>(PtrValue->getType());
Type *IntPtrTy = getIntPtrTy(&DL, PtrTy->getAddressSpace());
Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace());
Value *PtrIntValue = CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");

Value *Mask = ConstantInt::get(IntPtrTy,
Expand Down
13 changes: 6 additions & 7 deletions llvm/include/llvm/IR/InstrTypes.h
Original file line number Diff line number Diff line change
Expand Up @@ -570,10 +570,9 @@ class CastInst : public UnaryInstruction {
/// This ensures that any pointer<->integer cast has enough bits in the
/// integer and any other cast is a bitcast.
static bool isBitOrNoopPointerCastable(
Type *SrcTy, ///< The Type from which the value should be cast.
Type *DestTy, ///< The Type to which the value should be cast.
const DataLayout *Layout = 0 ///< Optional DataLayout.
);
Type *SrcTy, ///< The Type from which the value should be cast.
Type *DestTy, ///< The Type to which the value should be cast.
const DataLayout &DL);

/// Returns the opcode necessary to cast Val into Ty using usual casting
/// rules.
Expand Down Expand Up @@ -621,9 +620,9 @@ class CastInst : public UnaryInstruction {
) const;

/// @brief Determine if this cast is a no-op cast.
bool isNoopCast(
const DataLayout *DL ///< DataLayout to get the Int Ptr type from.
) const;
///
/// \param DL is the DataLayout to get the Int Ptr type from.
bool isNoopCast(const DataLayout &DL) const;

/// Determine how a pair of casts can be eliminated, if they can be at all.
/// This is a helper function for both CastInst and ConstantExpr.
Expand Down
2 changes: 1 addition & 1 deletion llvm/include/llvm/IR/Value.h
Original file line number Diff line number Diff line change
Expand Up @@ -450,7 +450,7 @@ class Value {
///
/// Test if this value is always a pointer to allocated and suitably aligned
/// memory for a simple load or store.
bool isDereferenceablePointer(const DataLayout *DL = nullptr) const;
bool isDereferenceablePointer(const DataLayout &DL) const;

/// \brief Translate PHI node to its predecessor from the given basic block.
///
Expand Down
3 changes: 1 addition & 2 deletions llvm/include/llvm/Target/TargetLowering.h
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ class TargetLoweringBase {

public:
const TargetMachine &getTargetMachine() const { return TM; }
const DataLayout *getDataLayout() const { return DL; }
const DataLayout *getDataLayout() const { return TM.getDataLayout(); }

bool isBigEndian() const { return !IsLittleEndian; }
bool isLittleEndian() const { return IsLittleEndian; }
Expand Down Expand Up @@ -1639,7 +1639,6 @@ class TargetLoweringBase {

private:
const TargetMachine &TM;
const DataLayout *DL;

/// True if this is a little endian target.
bool IsLittleEndian;
Expand Down
2 changes: 1 addition & 1 deletion llvm/include/llvm/Transforms/IPO/LowerBitSets.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ struct BitSetInfo {

bool containsGlobalOffset(uint64_t Offset) const;

bool containsValue(const DataLayout *DL,
bool containsValue(const DataLayout &DL,
const DenseMap<GlobalVariable *, uint64_t> &GlobalLayout,
Value *V, uint64_t COffset = 0) const;
};
Expand Down
32 changes: 14 additions & 18 deletions llvm/include/llvm/Transforms/Utils/BuildLibCalls.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,52 +28,50 @@ namespace llvm {
/// EmitStrLen - Emit a call to the strlen function to the builder, for the
/// specified pointer. Ptr is required to be some pointer type, and the
/// return value has 'intptr_t' type.
Value *EmitStrLen(Value *Ptr, IRBuilder<> &B, const DataLayout *TD,
Value *EmitStrLen(Value *Ptr, IRBuilder<> &B, const DataLayout &DL,
const TargetLibraryInfo *TLI);

/// EmitStrNLen - Emit a call to the strnlen function to the builder, for the
/// specified pointer. Ptr is required to be some pointer type, MaxLen must
/// be of size_t type, and the return value has 'intptr_t' type.
Value *EmitStrNLen(Value *Ptr, Value *MaxLen, IRBuilder<> &B,
const DataLayout *TD, const TargetLibraryInfo *TLI);
const DataLayout &DL, const TargetLibraryInfo *TLI);

/// EmitStrChr - Emit a call to the strchr function to the builder, for the
/// specified pointer and character. Ptr is required to be some pointer type,
/// and the return value has 'i8*' type.
Value *EmitStrChr(Value *Ptr, char C, IRBuilder<> &B, const DataLayout *TD,
Value *EmitStrChr(Value *Ptr, char C, IRBuilder<> &B,
const TargetLibraryInfo *TLI);

/// EmitStrNCmp - Emit a call to the strncmp function to the builder.
Value *EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
const DataLayout *TD, const TargetLibraryInfo *TLI);
const DataLayout &DL, const TargetLibraryInfo *TLI);

/// EmitStrCpy - Emit a call to the strcpy function to the builder, for the
/// specified pointer arguments.
Value *EmitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B,
const DataLayout *TD, const TargetLibraryInfo *TLI,
StringRef Name = "strcpy");
const TargetLibraryInfo *TLI, StringRef Name = "strcpy");

/// EmitStrNCpy - Emit a call to the strncpy function to the builder, for the
/// specified pointer arguments and length.
Value *EmitStrNCpy(Value *Dst, Value *Src, Value *Len, IRBuilder<> &B,
const DataLayout *TD, const TargetLibraryInfo *TLI,
StringRef Name = "strncpy");
const TargetLibraryInfo *TLI, StringRef Name = "strncpy");

/// EmitMemCpyChk - Emit a call to the __memcpy_chk function to the builder.
/// This expects that the Len and ObjSize have type 'intptr_t' and Dst/Src
/// are pointers.
Value *EmitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize,
IRBuilder<> &B, const DataLayout *TD,
IRBuilder<> &B, const DataLayout &DL,
const TargetLibraryInfo *TLI);

/// EmitMemChr - Emit a call to the memchr function. This assumes that Ptr is
/// a pointer, Val is an i32 value, and Len is an 'intptr_t' value.
Value *EmitMemChr(Value *Ptr, Value *Val, Value *Len, IRBuilder<> &B,
const DataLayout *TD, const TargetLibraryInfo *TLI);
const DataLayout &DL, const TargetLibraryInfo *TLI);

/// EmitMemCmp - Emit a call to the memcmp function.
Value *EmitMemCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
const DataLayout *TD, const TargetLibraryInfo *TLI);
const DataLayout &DL, const TargetLibraryInfo *TLI);

/// EmitUnaryFloatFnCall - Emit a call to the unary function named 'Name'
/// (e.g. 'floor'). This function is known to take a single of type matching
Expand All @@ -93,28 +91,26 @@ namespace llvm {

/// EmitPutChar - Emit a call to the putchar function. This assumes that Char
/// is an integer.
Value *EmitPutChar(Value *Char, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI);
Value *EmitPutChar(Value *Char, IRBuilder<> &B, const TargetLibraryInfo *TLI);

/// EmitPutS - Emit a call to the puts function. This assumes that Str is
/// some pointer.
Value *EmitPutS(Value *Str, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI);
Value *EmitPutS(Value *Str, IRBuilder<> &B, const TargetLibraryInfo *TLI);

/// EmitFPutC - Emit a call to the fputc function. This assumes that Char is
/// an i32, and File is a pointer to FILE.
Value *EmitFPutC(Value *Char, Value *File, IRBuilder<> &B,
const DataLayout *TD, const TargetLibraryInfo *TLI);
const TargetLibraryInfo *TLI);

/// EmitFPutS - Emit a call to the puts function. Str is required to be a
/// pointer and File is a pointer to FILE.
Value *EmitFPutS(Value *Str, Value *File, IRBuilder<> &B, const DataLayout *TD,
Value *EmitFPutS(Value *Str, Value *File, IRBuilder<> &B,
const TargetLibraryInfo *TLI);

/// EmitFWrite - Emit a call to the fwrite function. This assumes that Ptr is
/// a pointer, Size is an 'intptr_t', and File is a pointer to FILE.
Value *EmitFWrite(Value *Ptr, Value *Size, Value *File, IRBuilder<> &B,
const DataLayout *TD, const TargetLibraryInfo *TLI);
const DataLayout &DL, const TargetLibraryInfo *TLI);
}

#endif
2 changes: 0 additions & 2 deletions llvm/include/llvm/Transforms/Utils/Cloning.h
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,6 @@ void CloneAndPruneIntoFromInst(Function *NewFunc, const Function *OldFunc,
SmallVectorImpl<ReturnInst*> &Returns,
const char *NameSuffix = "",
ClonedCodeInfo *CodeInfo = nullptr,
const DataLayout *DL = nullptr,
CloningDirector *Director = nullptr);


Expand All @@ -184,7 +183,6 @@ void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
SmallVectorImpl<ReturnInst*> &Returns,
const char *NameSuffix = "",
ClonedCodeInfo *CodeInfo = nullptr,
const DataLayout *DL = nullptr,
Instruction *TheCall = nullptr);

/// InlineFunctionInfo - This class captures the data input to the
Expand Down
32 changes: 14 additions & 18 deletions llvm/include/llvm/Transforms/Utils/Local.h
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ bool RecursivelyDeleteDeadPHINode(PHINode *PN,
///
/// This returns true if it changed the code, note that it can delete
/// instructions in other blocks as well in this block.
bool SimplifyInstructionsInBlock(BasicBlock *BB, const DataLayout *TD = nullptr,
bool SimplifyInstructionsInBlock(BasicBlock *BB,
const TargetLibraryInfo *TLI = nullptr);

//===----------------------------------------------------------------------===//
Expand All @@ -106,8 +106,7 @@ bool SimplifyInstructionsInBlock(BasicBlock *BB, const DataLayout *TD = nullptr,
///
/// .. and delete the predecessor corresponding to the '1', this will attempt to
/// recursively fold the 'and' to 0.
void RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred,
DataLayout *TD = nullptr);
void RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred);

/// MergeBasicBlockIntoOnlyPred - BB is a block with one predecessor and its
/// predecessor is known to have one successor (BB!). Eliminate the edge
Expand Down Expand Up @@ -137,8 +136,7 @@ bool EliminateDuplicatePHINodes(BasicBlock *BB);
/// the basic block that was pointed to.
///
bool SimplifyCFG(BasicBlock *BB, const TargetTransformInfo &TTI,
unsigned BonusInstThreshold, const DataLayout *TD = nullptr,
AssumptionCache *AC = nullptr);
unsigned BonusInstThreshold, AssumptionCache *AC = nullptr);

/// FlatternCFG - This function is used to flatten a CFG. For
/// example, it uses parallel-and and parallel-or mode to collapse
Expand All @@ -150,8 +148,7 @@ bool FlattenCFG(BasicBlock *BB, AliasAnalysis *AA = nullptr);
/// and if a predecessor branches to us and one of our successors, fold the
/// setcc into the predecessor and use logical operations to pick the right
/// destination.
bool FoldBranchToCommonDest(BranchInst *BI, const DataLayout *DL = nullptr,
unsigned BonusInstThreshold = 1);
bool FoldBranchToCommonDest(BranchInst *BI, unsigned BonusInstThreshold = 1);

/// DemoteRegToStack - This function takes a virtual register computed by an
/// Instruction and replaces it with a slot in the stack frame, allocated via
Expand All @@ -173,30 +170,29 @@ AllocaInst *DemotePHIToStack(PHINode *P, Instruction *AllocaPoint = nullptr);
/// and it is more than the alignment of the ultimate object, see if we can
/// increase the alignment of the ultimate object, making this check succeed.
unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
const DataLayout *TD = nullptr,
AssumptionCache *AC = nullptr,
const DataLayout &DL,
const Instruction *CxtI = nullptr,
AssumptionCache *AC = nullptr,
const DominatorTree *DT = nullptr);

/// getKnownAlignment - Try to infer an alignment for the specified pointer.
static inline unsigned getKnownAlignment(Value *V,
const DataLayout *TD = nullptr,
AssumptionCache *AC = nullptr,
static inline unsigned getKnownAlignment(Value *V, const DataLayout &DL,
const Instruction *CxtI = nullptr,
AssumptionCache *AC = nullptr,
const DominatorTree *DT = nullptr) {
return getOrEnforceKnownAlignment(V, 0, TD, AC, CxtI, DT);
return getOrEnforceKnownAlignment(V, 0, DL, CxtI, AC, DT);
}

/// EmitGEPOffset - Given a getelementptr instruction/constantexpr, emit the
/// code necessary to compute the offset from the base pointer (without adding
/// in the base pointer). Return the result as a signed integer of intptr size.
/// When NoAssumptions is true, no assumptions about index computation not
/// overflowing is made.
template<typename IRBuilderTy>
Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &TD, User *GEP,
template <typename IRBuilderTy>
Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP,
bool NoAssumptions = false) {
GEPOperator *GEPOp = cast<GEPOperator>(GEP);
Type *IntPtrTy = TD.getIntPtrType(GEP->getType());
Type *IntPtrTy = DL.getIntPtrType(GEP->getType());
Value *Result = Constant::getNullValue(IntPtrTy);

// If the GEP is inbounds, we know that none of the addressing operations will
Expand All @@ -211,7 +207,7 @@ Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &TD, User *GEP,
for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); i != e;
++i, ++GTI) {
Value *Op = *i;
uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
if (Constant *OpC = dyn_cast<Constant>(Op)) {
if (OpC->isZeroValue())
continue;
Expand All @@ -222,7 +218,7 @@ Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &TD, User *GEP,
OpC = OpC->getSplatValue();

uint64_t OpValue = cast<ConstantInt>(OpC)->getZExtValue();
Size = TD.getStructLayout(STy)->getElementOffset(OpValue);
Size = DL.getStructLayout(STy)->getElementOffset(OpValue);

if (Size)
Result = Builder->CreateAdd(Result, ConstantInt::get(IntPtrTy, Size),
Expand Down
17 changes: 8 additions & 9 deletions llvm/include/llvm/Transforms/Utils/LoopUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@ BasicBlock *InsertPreheaderForLoop(Loop *L, Pass *P);
/// passed into it.
bool simplifyLoop(Loop *L, DominatorTree *DT, LoopInfo *LI, Pass *PP,
AliasAnalysis *AA = nullptr, ScalarEvolution *SE = nullptr,
const DataLayout *DL = nullptr,
AssumptionCache *AC = nullptr);

/// \brief Put loop into LCSSA form.
Expand Down Expand Up @@ -85,13 +84,13 @@ bool formLCSSARecursively(Loop &L, DominatorTree &DT, LoopInfo *LI,
/// dominated by the specified block, and that are in the current loop) in
/// reverse depth first order w.r.t the DominatorTree. This allows us to visit
/// uses before definitions, allowing us to sink a loop body in one pass without
/// iteration. Takes DomTreeNode, AliasAnalysis, LoopInfo, DominatorTree,
/// DataLayout, TargetLibraryInfo, Loop, AliasSet information for all
/// instructions of the loop and loop safety information as arguments.
/// It returns changed status.
/// iteration. Takes DomTreeNode, AliasAnalysis, LoopInfo, DominatorTree,
/// DataLayout, TargetLibraryInfo, Loop, AliasSet information for all
/// instructions of the loop and loop safety information as arguments.
/// It returns changed status.
bool sinkRegion(DomTreeNode *, AliasAnalysis *, LoopInfo *, DominatorTree *,
const DataLayout *, TargetLibraryInfo *, Loop *,
AliasSetTracker *, LICMSafetyInfo *);
TargetLibraryInfo *, Loop *, AliasSetTracker *,
LICMSafetyInfo *);

/// \brief Walk the specified region of the CFG (defined by all blocks
/// dominated by the specified block, and that are in the current loop) in depth
Expand All @@ -101,8 +100,8 @@ bool sinkRegion(DomTreeNode *, AliasAnalysis *, LoopInfo *, DominatorTree *,
/// TargetLibraryInfo, Loop, AliasSet information for all instructions of the
/// loop and loop safety information as arguments. It returns changed status.
bool hoistRegion(DomTreeNode *, AliasAnalysis *, LoopInfo *, DominatorTree *,
const DataLayout *, TargetLibraryInfo *, Loop *,
AliasSetTracker *, LICMSafetyInfo *);
TargetLibraryInfo *, Loop *, AliasSetTracker *,
LICMSafetyInfo *);

/// \brief Try to promote memory values to scalars by sinking stores out of
/// the loop and moving loads to before the loop. We do this by looping over
Expand Down
7 changes: 3 additions & 4 deletions llvm/include/llvm/Transforms/Utils/SimplifyLibCalls.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,12 +37,11 @@ class Function;
/// is unknown) by passing true for OnlyLowerUnknownSize.
class FortifiedLibCallSimplifier {
private:
const DataLayout *DL;
const TargetLibraryInfo *TLI;
bool OnlyLowerUnknownSize;

public:
FortifiedLibCallSimplifier(const DataLayout *DL, const TargetLibraryInfo *TLI,
FortifiedLibCallSimplifier(const TargetLibraryInfo *TLI,
bool OnlyLowerUnknownSize = false);

/// \brief Take the given call instruction and return a more
Expand Down Expand Up @@ -72,7 +71,7 @@ class FortifiedLibCallSimplifier {
class LibCallSimplifier {
private:
FortifiedLibCallSimplifier FortifiedSimplifier;
const DataLayout *DL;
const DataLayout &DL;
const TargetLibraryInfo *TLI;
bool UnsafeFPShrink;
function_ref<void(Instruction *, Value *)> Replacer;
Expand All @@ -87,7 +86,7 @@ class LibCallSimplifier {
void replaceAllUsesWith(Instruction *I, Value *With);

public:
LibCallSimplifier(const DataLayout *TD, const TargetLibraryInfo *TLI,
LibCallSimplifier(const DataLayout &DL, const TargetLibraryInfo *TLI,
function_ref<void(Instruction *, Value *)> Replacer =
&replaceAllUsesWithDefault);

Expand Down
5 changes: 3 additions & 2 deletions llvm/lib/Analysis/AliasAnalysis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -407,9 +407,10 @@ AliasAnalysis::ModRefResult
AliasAnalysis::callCapturesBefore(const Instruction *I,
const AliasAnalysis::Location &MemLoc,
DominatorTree *DT) {
if (!DT || !DL) return AliasAnalysis::ModRef;
if (!DT)
return AliasAnalysis::ModRef;

const Value *Object = GetUnderlyingObject(MemLoc.Ptr, DL);
const Value *Object = GetUnderlyingObject(MemLoc.Ptr, *DL);
if (!isIdentifiedObject(Object) || isa<GlobalValue>(Object) ||
isa<Constant>(Object))
return AliasAnalysis::ModRef;
Expand Down
50 changes: 20 additions & 30 deletions llvm/lib/Analysis/BasicAliasAnalysis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
const TargetLibraryInfo &TLI,
bool RoundToAlign = false) {
uint64_t Size;
if (getObjectSize(V, Size, &DL, &TLI, RoundToAlign))
if (getObjectSize(V, Size, DL, &TLI, RoundToAlign))
return Size;
return AliasAnalysis::UnknownSize;
}
Expand Down Expand Up @@ -221,7 +221,7 @@ static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
case Instruction::Or:
// X|C == X+C if all the bits in C are unset in X. Otherwise we can't
// analyze it.
if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), &DL, 0, AC,
if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
BOp, DT))
break;
// FALL THROUGH.
Expand Down Expand Up @@ -292,7 +292,7 @@ static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
static const Value *
DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
SmallVectorImpl<VariableGEPIndex> &VarIndices,
bool &MaxLookupReached, const DataLayout *DL,
bool &MaxLookupReached, const DataLayout &DL,
AssumptionCache *AC, DominatorTree *DT) {
// Limit recursion depth to limit compile time in crazy cases.
unsigned MaxLookup = MaxLookupSearchDepth;
Expand Down Expand Up @@ -341,16 +341,6 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
if (!GEPOp->getOperand(0)->getType()->getPointerElementType()->isSized())
return V;

// If we are lacking DataLayout information, we can't compute the offets of
// elements computed by GEPs. However, we can handle bitcast equivalent
// GEPs.
if (!DL) {
if (!GEPOp->hasAllZeroIndices())
return V;
V = GEPOp->getOperand(0);
continue;
}

unsigned AS = GEPOp->getPointerAddressSpace();
// Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
gep_type_iterator GTI = gep_type_begin(GEPOp);
Expand All @@ -363,30 +353,30 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
if (FieldNo == 0) continue;

BaseOffs += DL->getStructLayout(STy)->getElementOffset(FieldNo);
BaseOffs += DL.getStructLayout(STy)->getElementOffset(FieldNo);
continue;
}

// For an array/pointer, add the element offset, explicitly scaled.
if (ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
if (CIdx->isZero()) continue;
BaseOffs += DL->getTypeAllocSize(*GTI)*CIdx->getSExtValue();
BaseOffs += DL.getTypeAllocSize(*GTI) * CIdx->getSExtValue();
continue;
}

uint64_t Scale = DL->getTypeAllocSize(*GTI);
uint64_t Scale = DL.getTypeAllocSize(*GTI);
ExtensionKind Extension = EK_NotExtended;

// If the integer type is smaller than the pointer size, it is implicitly
// sign extended to pointer size.
unsigned Width = Index->getType()->getIntegerBitWidth();
if (DL->getPointerSizeInBits(AS) > Width)
if (DL.getPointerSizeInBits(AS) > Width)
Extension = EK_SignExt;

// Use GetLinearExpression to decompose the index into a C1*V+C2 form.
APInt IndexScale(Width, 0), IndexOffset(Width, 0);
Index = GetLinearExpression(Index, IndexScale, IndexOffset, Extension,
*DL, 0, AC, DT);
Index = GetLinearExpression(Index, IndexScale, IndexOffset, Extension, DL,
0, AC, DT);

// The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
// This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
Expand All @@ -408,7 +398,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,

// Make sure that we have a scale that makes sense for this target's
// pointer size.
if (unsigned ShiftBits = 64 - DL->getPointerSizeInBits(AS)) {
if (unsigned ShiftBits = 64 - DL.getPointerSizeInBits(AS)) {
Scale <<= ShiftBits;
Scale = (int64_t)Scale >> ShiftBits;
}
Expand Down Expand Up @@ -610,7 +600,7 @@ BasicAliasAnalysis::pointsToConstantMemory(const Location &Loc, bool OrLocal) {
SmallVector<const Value *, 16> Worklist;
Worklist.push_back(Loc.Ptr);
do {
const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL);
const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), *DL);
if (!Visited.insert(V).second) {
Visited.clear();
return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
Expand Down Expand Up @@ -828,7 +818,7 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) &&
"AliasAnalysis query involving multiple functions!");

const Value *Object = GetUnderlyingObject(Loc.Ptr, DL);
const Value *Object = GetUnderlyingObject(Loc.Ptr, *DL);

// If this is a tail call and Loc.Ptr points to a stack location, we know that
// the tail call cannot access or modify the local stack.
Expand Down Expand Up @@ -1045,10 +1035,10 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
SmallVector<VariableGEPIndex, 4> GEP2VariableIndices;
const Value *GEP2BasePtr =
DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices,
GEP2MaxLookupReached, DL, AC2, DT);
GEP2MaxLookupReached, *DL, AC2, DT);
const Value *GEP1BasePtr =
DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices,
GEP1MaxLookupReached, DL, AC1, DT);
GEP1MaxLookupReached, *DL, AC1, DT);
// DecomposeGEPExpression and GetUnderlyingObject should return the
// same result except when DecomposeGEPExpression has no DataLayout.
if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
Expand Down Expand Up @@ -1077,14 +1067,14 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
// about the relation of the resulting pointer.
const Value *GEP1BasePtr =
DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices,
GEP1MaxLookupReached, DL, AC1, DT);
GEP1MaxLookupReached, *DL, AC1, DT);

int64_t GEP2BaseOffset;
bool GEP2MaxLookupReached;
SmallVector<VariableGEPIndex, 4> GEP2VariableIndices;
const Value *GEP2BasePtr =
DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices,
GEP2MaxLookupReached, DL, AC2, DT);
GEP2MaxLookupReached, *DL, AC2, DT);

// DecomposeGEPExpression and GetUnderlyingObject should return the
// same result except when DecomposeGEPExpression has no DataLayout.
Expand Down Expand Up @@ -1134,7 +1124,7 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,

const Value *GEP1BasePtr =
DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices,
GEP1MaxLookupReached, DL, AC1, DT);
GEP1MaxLookupReached, *DL, AC1, DT);

// DecomposeGEPExpression and GetUnderlyingObject should return the
// same result except when DecomposeGEPExpression has no DataLayout.
Expand Down Expand Up @@ -1203,7 +1193,7 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
const Value *V = GEP1VariableIndices[i].V;

bool SignKnownZero, SignKnownOne;
ComputeSignBit(const_cast<Value *>(V), SignKnownZero, SignKnownOne, DL,
ComputeSignBit(const_cast<Value *>(V), SignKnownZero, SignKnownOne, *DL,
0, AC1, nullptr, DT);

// Zero-extension widens the variable, and so forces the sign
Expand Down Expand Up @@ -1412,8 +1402,8 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
return NoAlias; // Scalars cannot alias each other

// Figure out what objects these things are pointing to if we can.
const Value *O1 = GetUnderlyingObject(V1, DL, MaxLookupSearchDepth);
const Value *O2 = GetUnderlyingObject(V2, DL, MaxLookupSearchDepth);
const Value *O1 = GetUnderlyingObject(V1, *DL, MaxLookupSearchDepth);
const Value *O2 = GetUnderlyingObject(V2, *DL, MaxLookupSearchDepth);

// Null values in the default address space don't point to any object, so they
// don't alias any other pointer.
Expand Down
249 changes: 116 additions & 133 deletions llvm/lib/Analysis/ConstantFolding.cpp

Large diffs are not rendered by default.

22 changes: 11 additions & 11 deletions llvm/lib/Analysis/DependenceAnalysis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/InstIterator.h"
#include "llvm/IR/Module.h"
#include "llvm/IR/Operator.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
Expand Down Expand Up @@ -624,14 +625,12 @@ void Dependence::dump(raw_ostream &OS) const {
OS << "!\n";
}



static
AliasAnalysis::AliasResult underlyingObjectsAlias(AliasAnalysis *AA,
const Value *A,
const Value *B) {
const Value *AObj = GetUnderlyingObject(A);
const Value *BObj = GetUnderlyingObject(B);
static AliasAnalysis::AliasResult underlyingObjectsAlias(AliasAnalysis *AA,
const DataLayout &DL,
const Value *A,
const Value *B) {
const Value *AObj = GetUnderlyingObject(A, DL);
const Value *BObj = GetUnderlyingObject(B, DL);
return AA->alias(AObj, AA->getTypeStoreSize(AObj->getType()),
BObj, AA->getTypeStoreSize(BObj->getType()));
}
Expand Down Expand Up @@ -3313,7 +3312,8 @@ DependenceAnalysis::depends(Instruction *Src, Instruction *Dst,
Value *SrcPtr = getPointerOperand(Src);
Value *DstPtr = getPointerOperand(Dst);

switch (underlyingObjectsAlias(AA, DstPtr, SrcPtr)) {
switch (underlyingObjectsAlias(AA, F->getParent()->getDataLayout(), DstPtr,
SrcPtr)) {
case AliasAnalysis::MayAlias:
case AliasAnalysis::PartialAlias:
// cannot analyse objects if we don't understand their aliasing.
Expand Down Expand Up @@ -3757,8 +3757,8 @@ const SCEV *DependenceAnalysis::getSplitIteration(const Dependence &Dep,
assert(isLoadOrStore(Dst));
Value *SrcPtr = getPointerOperand(Src);
Value *DstPtr = getPointerOperand(Dst);
assert(underlyingObjectsAlias(AA, DstPtr, SrcPtr) ==
AliasAnalysis::MustAlias);
assert(underlyingObjectsAlias(AA, F->getParent()->getDataLayout(), DstPtr,
SrcPtr) == AliasAnalysis::MustAlias);

// establish loop nesting levels
establishNestingLevels(Src, Dst);
Expand Down
10 changes: 6 additions & 4 deletions llvm/lib/Analysis/IPA/GlobalsModRef.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -322,7 +322,8 @@ bool GlobalsModRef::AnalyzeIndirectGlobalMemory(GlobalValue *GV) {
continue;

// Check the value being stored.
Value *Ptr = GetUnderlyingObject(SI->getOperand(0));
Value *Ptr = GetUnderlyingObject(SI->getOperand(0),
GV->getParent()->getDataLayout());

if (!isAllocLikeFn(Ptr, TLI))
return false; // Too hard to analyze.
Expand Down Expand Up @@ -481,8 +482,8 @@ AliasAnalysis::AliasResult
GlobalsModRef::alias(const Location &LocA,
const Location &LocB) {
// Get the base object these pointers point to.
const Value *UV1 = GetUnderlyingObject(LocA.Ptr);
const Value *UV2 = GetUnderlyingObject(LocB.Ptr);
const Value *UV1 = GetUnderlyingObject(LocA.Ptr, *DL);
const Value *UV2 = GetUnderlyingObject(LocB.Ptr, *DL);

// If either of the underlying values is a global, they may be non-addr-taken
// globals, which we can answer queries about.
Expand Down Expand Up @@ -540,8 +541,9 @@ GlobalsModRef::getModRefInfo(ImmutableCallSite CS,

// If we are asking for mod/ref info of a direct call with a pointer to a
// global we are tracking, return information if we have it.
const DataLayout &DL = CS.getCaller()->getParent()->getDataLayout();
if (const GlobalValue *GV =
dyn_cast<GlobalValue>(GetUnderlyingObject(Loc.Ptr)))
dyn_cast<GlobalValue>(GetUnderlyingObject(Loc.Ptr, DL)))
if (GV->hasLocalLinkage())
if (const Function *F = CS.getCalledFunction())
if (NonAddressTakenGlobals.count(GV))
Expand Down
53 changes: 26 additions & 27 deletions llvm/lib/Analysis/IPA/InlineCost.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,9 +45,6 @@ class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
typedef InstVisitor<CallAnalyzer, bool> Base;
friend class InstVisitor<CallAnalyzer, bool>;

// DataLayout if available, or null.
const DataLayout *const DL;

/// The TargetTransformInfo available for this compilation.
const TargetTransformInfo &TTI;

Expand Down Expand Up @@ -145,9 +142,9 @@ class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
bool visitUnreachableInst(UnreachableInst &I);

public:
CallAnalyzer(const DataLayout *DL, const TargetTransformInfo &TTI,
AssumptionCacheTracker *ACT, Function &Callee, int Threshold)
: DL(DL), TTI(TTI), ACT(ACT), F(Callee), Threshold(Threshold), Cost(0),
CallAnalyzer(const TargetTransformInfo &TTI, AssumptionCacheTracker *ACT,
Function &Callee, int Threshold)
: TTI(TTI), ACT(ACT), F(Callee), Threshold(Threshold), Cost(0),
IsCallerRecursive(false), IsRecursiveCall(false),
ExposesReturnsTwice(false), HasDynamicAlloca(false),
ContainsNoDuplicateCall(false), HasReturn(false), HasIndirectBr(false),
Expand Down Expand Up @@ -244,10 +241,8 @@ bool CallAnalyzer::isGEPOffsetConstant(GetElementPtrInst &GEP) {
/// Returns false if unable to compute the offset for any reason. Respects any
/// simplified values known during the analysis of this callsite.
bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
if (!DL)
return false;

unsigned IntPtrWidth = DL->getPointerSizeInBits();
const DataLayout &DL = F.getParent()->getDataLayout();
unsigned IntPtrWidth = DL.getPointerSizeInBits();
assert(IntPtrWidth == Offset.getBitWidth());

for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
Expand All @@ -263,12 +258,12 @@ bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
// Handle a struct index, which adds its field offset to the pointer.
if (StructType *STy = dyn_cast<StructType>(*GTI)) {
unsigned ElementIdx = OpC->getZExtValue();
const StructLayout *SL = DL->getStructLayout(STy);
const StructLayout *SL = DL.getStructLayout(STy);
Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx));
continue;
}

APInt TypeSize(IntPtrWidth, DL->getTypeAllocSize(GTI.getIndexedType()));
APInt TypeSize(IntPtrWidth, DL.getTypeAllocSize(GTI.getIndexedType()));
Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize;
}
return true;
Expand All @@ -289,9 +284,9 @@ bool CallAnalyzer::visitAlloca(AllocaInst &I) {

// Accumulate the allocated size.
if (I.isStaticAlloca()) {
const DataLayout &DL = F.getParent()->getDataLayout();
Type *Ty = I.getAllocatedType();
AllocatedSize += (DL ? DL->getTypeAllocSize(Ty) :
Ty->getPrimitiveSizeInBits());
AllocatedSize += DL.getTypeAllocSize(Ty);
}

// We will happily inline static alloca instructions.
Expand Down Expand Up @@ -327,7 +322,7 @@ bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) {

// Try to fold GEPs of constant-offset call site argument pointers. This
// requires target data and inbounds GEPs.
if (DL && I.isInBounds()) {
if (I.isInBounds()) {
// Check if we have a base + offset for the pointer.
Value *Ptr = I.getPointerOperand();
std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Ptr);
Expand Down Expand Up @@ -409,7 +404,7 @@ bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
// Track base/offset pairs when converted to a plain integer provided the
// integer is large enough to represent the pointer.
unsigned IntegerSize = I.getType()->getScalarSizeInBits();
const DataLayout &DL = I.getModule()->getDataLayout();
const DataLayout &DL = F.getParent()->getDataLayout();
if (IntegerSize >= DL.getPointerSizeInBits()) {
std::pair<Value *, APInt> BaseAndOffset
= ConstantOffsetPtrs.lookup(I.getOperand(0));
Expand Down Expand Up @@ -447,7 +442,7 @@ bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
// modifications provided the integer is not too large.
Value *Op = I.getOperand(0);
unsigned IntegerSize = Op->getType()->getScalarSizeInBits();
const DataLayout &DL = I.getModule()->getDataLayout();
const DataLayout &DL = F.getParent()->getDataLayout();
if (IntegerSize <= DL.getPointerSizeInBits()) {
std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op);
if (BaseAndOffset.first)
Expand Down Expand Up @@ -485,12 +480,14 @@ bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) {
Constant *COp = dyn_cast<Constant>(Operand);
if (!COp)
COp = SimplifiedValues.lookup(Operand);
if (COp)
if (COp) {
const DataLayout &DL = F.getParent()->getDataLayout();
if (Constant *C = ConstantFoldInstOperands(I.getOpcode(), I.getType(),
COp, DL)) {
SimplifiedValues[&I] = C;
return true;
}
}

// Disable any SROA on the argument to arbitrary unary operators.
disableSROA(Operand);
Expand Down Expand Up @@ -595,6 +592,7 @@ bool CallAnalyzer::visitSub(BinaryOperator &I) {

bool CallAnalyzer::visitBinaryOperator(BinaryOperator &I) {
Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
const DataLayout &DL = F.getParent()->getDataLayout();
if (!isa<Constant>(LHS))
if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS))
LHS = SimpleLHS;
Expand Down Expand Up @@ -788,7 +786,7 @@ bool CallAnalyzer::visitCallSite(CallSite CS) {
// during devirtualization and so we want to give it a hefty bonus for
// inlining, but cap that bonus in the event that inlining wouldn't pan
// out. Pretend to inline the function, with a custom threshold.
CallAnalyzer CA(DL, TTI, ACT, *F, InlineConstants::IndirectCallThreshold);
CallAnalyzer CA(TTI, ACT, *F, InlineConstants::IndirectCallThreshold);
if (CA.analyzeCall(CS)) {
// We were able to inline the indirect call! Subtract the cost from the
// bonus we want to apply, but don't go below zero.
Expand Down Expand Up @@ -976,10 +974,11 @@ bool CallAnalyzer::analyzeBlock(BasicBlock *BB,
/// returns 0 if V is not a pointer, and returns the constant '0' if there are
/// no constant offsets applied.
ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
if (!DL || !V->getType()->isPointerTy())
if (!V->getType()->isPointerTy())
return nullptr;

unsigned IntPtrWidth = DL->getPointerSizeInBits();
const DataLayout &DL = F.getParent()->getDataLayout();
unsigned IntPtrWidth = DL.getPointerSizeInBits();
APInt Offset = APInt::getNullValue(IntPtrWidth);

// Even though we don't look through PHI nodes, we could be called on an
Expand All @@ -1003,7 +1002,7 @@ ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
assert(V->getType()->isPointerTy() && "Unexpected operand type!");
} while (Visited.insert(V).second);

Type *IntPtrTy = DL->getIntPtrType(V->getContext());
Type *IntPtrTy = DL.getIntPtrType(V->getContext());
return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset));
}

Expand Down Expand Up @@ -1034,16 +1033,17 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
assert(NumVectorInstructions == 0);
FiftyPercentVectorBonus = Threshold;
TenPercentVectorBonus = Threshold / 2;
const DataLayout &DL = F.getParent()->getDataLayout();

// Give out bonuses per argument, as the instructions setting them up will
// be gone after inlining.
for (unsigned I = 0, E = CS.arg_size(); I != E; ++I) {
if (DL && CS.isByValArgument(I)) {
if (CS.isByValArgument(I)) {
// We approximate the number of loads and stores needed by dividing the
// size of the byval type by the target's pointer size.
PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType());
unsigned TypeSize = DL->getTypeSizeInBits(PTy->getElementType());
unsigned PointerSize = DL->getPointerSizeInBits();
unsigned TypeSize = DL.getTypeSizeInBits(PTy->getElementType());
unsigned PointerSize = DL.getPointerSizeInBits();
// Ceiling division.
unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;

Expand Down Expand Up @@ -1333,8 +1333,7 @@ InlineCost InlineCostAnalysis::getInlineCost(CallSite CS, Function *Callee,
DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName()
<< "...\n");

CallAnalyzer CA(&Callee->getParent()->getDataLayout(), TTIWP->getTTI(*Callee),
ACT, *Callee, Threshold);
CallAnalyzer CA(TTIWP->getTTI(*Callee), ACT, *Callee, Threshold);
bool ShouldInline = CA.analyzeCall(CS);

DEBUG(CA.dump());
Expand Down
7 changes: 4 additions & 3 deletions llvm/lib/Analysis/IVUsers.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,8 @@ static bool isSimplifiedLoopNest(BasicBlock *BB, const DominatorTree *DT,
/// return true. Otherwise, return false.
bool IVUsers::AddUsersImpl(Instruction *I,
SmallPtrSetImpl<Loop*> &SimpleLoopNests) {
const DataLayout &DL = I->getModule()->getDataLayout();

// Add this IV user to the Processed set before returning false to ensure that
// all IV users are members of the set. See IVUsers::isIVUserOrOperand.
if (!Processed.insert(I).second)
Expand All @@ -125,14 +127,14 @@ bool IVUsers::AddUsersImpl(Instruction *I,
// IVUsers is used by LSR which assumes that all SCEV expressions are safe to
// pass to SCEVExpander. Expressions are not safe to expand if they represent
// operations that are not safe to speculate, namely integer division.
if (!isa<PHINode>(I) && !isSafeToSpeculativelyExecute(I, DL))
if (!isa<PHINode>(I) && !isSafeToSpeculativelyExecute(I))
return false;

// LSR is not APInt clean, do not touch integers bigger than 64-bits.
// Also avoid creating IVs of non-native types. For example, we don't want a
// 64-bit IV in 32-bit code just because the loop has one 64-bit cast.
uint64_t Width = SE->getTypeSizeInBits(I->getType());
if (Width > 64 || (DL && !DL->isLegalInteger(Width)))
if (Width > 64 || !DL.isLegalInteger(Width))
return false;

// Get the symbolic expression for this instruction.
Expand Down Expand Up @@ -254,7 +256,6 @@ bool IVUsers::runOnLoop(Loop *l, LPPassManager &LPM) {
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
SE = &getAnalysis<ScalarEvolution>();
DL = &L->getHeader()->getModule()->getDataLayout();

// Find all uses of induction variables in this loop, and categorize
// them by stride. Start by finding all of the PHI nodes in the header for
Expand Down
111 changes: 53 additions & 58 deletions llvm/lib/Analysis/InstructionSimplify.cpp

Large diffs are not rendered by default.

102 changes: 55 additions & 47 deletions llvm/lib/Analysis/LazyValueInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ class LVILatticeVal {

/// Merge the specified lattice value into this one, updating this
/// one and returning true if anything changed.
bool mergeIn(const LVILatticeVal &RHS) {
bool mergeIn(const LVILatticeVal &RHS, const DataLayout &DL) {
if (RHS.isUndefined() || isOverdefined()) return false;
if (RHS.isOverdefined()) return markOverdefined();

Expand All @@ -215,11 +215,9 @@ class LVILatticeVal {

// Unless we can prove that the two Constants are different, we must
// move to overdefined.
// FIXME: use DataLayout/TargetLibraryInfo for smarter constant folding.
if (ConstantInt *Res = dyn_cast<ConstantInt>(
ConstantFoldCompareInstOperands(CmpInst::ICMP_NE,
getConstant(),
RHS.getNotConstant())))
if (ConstantInt *Res =
dyn_cast<ConstantInt>(ConstantFoldCompareInstOperands(
CmpInst::ICMP_NE, getConstant(), RHS.getNotConstant(), DL)))
if (Res->isOne())
return markNotConstant(RHS.getNotConstant());

Expand All @@ -241,11 +239,9 @@ class LVILatticeVal {

// Unless we can prove that the two Constants are different, we must
// move to overdefined.
// FIXME: use DataLayout/TargetLibraryInfo for smarter constant folding.
if (ConstantInt *Res = dyn_cast<ConstantInt>(
ConstantFoldCompareInstOperands(CmpInst::ICMP_NE,
getNotConstant(),
RHS.getConstant())))
if (ConstantInt *Res =
dyn_cast<ConstantInt>(ConstantFoldCompareInstOperands(
CmpInst::ICMP_NE, getNotConstant(), RHS.getConstant(), DL)))
if (Res->isOne())
return false;

Expand Down Expand Up @@ -353,13 +349,10 @@ namespace {
return true;
}

/// A pointer to the cache of @llvm.assume calls.
AssumptionCache *AC;
/// An optional DL pointer.
const DataLayout *DL;
/// An optional DT pointer.
DominatorTree *DT;

AssumptionCache *AC; ///< A pointer to the cache of @llvm.assume calls.
const DataLayout &DL; ///< A mandatory DataLayout
DominatorTree *DT; ///< An optional DT pointer.

friend struct LVIValueHandle;

void insertResult(Value *Val, BasicBlock *BB, const LVILatticeVal &Result) {
Expand Down Expand Up @@ -425,7 +418,7 @@ namespace {
OverDefinedCache.clear();
}

LazyValueInfoCache(AssumptionCache *AC, const DataLayout *DL = nullptr,
LazyValueInfoCache(AssumptionCache *AC, const DataLayout &DL,
DominatorTree *DT = nullptr)
: AC(AC), DL(DL), DT(DT) {}
};
Expand Down Expand Up @@ -578,11 +571,13 @@ bool LazyValueInfoCache::solveBlockValue(Value *Val, BasicBlock *BB) {
static bool InstructionDereferencesPointer(Instruction *I, Value *Ptr) {
if (LoadInst *L = dyn_cast<LoadInst>(I)) {
return L->getPointerAddressSpace() == 0 &&
GetUnderlyingObject(L->getPointerOperand()) == Ptr;
GetUnderlyingObject(L->getPointerOperand(),
L->getModule()->getDataLayout()) == Ptr;
}
if (StoreInst *S = dyn_cast<StoreInst>(I)) {
return S->getPointerAddressSpace() == 0 &&
GetUnderlyingObject(S->getPointerOperand()) == Ptr;
GetUnderlyingObject(S->getPointerOperand(),
S->getModule()->getDataLayout()) == Ptr;
}
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
if (MI->isVolatile()) return false;
Expand All @@ -592,11 +587,13 @@ static bool InstructionDereferencesPointer(Instruction *I, Value *Ptr) {
if (!Len || Len->isZero()) return false;

if (MI->getDestAddressSpace() == 0)
if (GetUnderlyingObject(MI->getRawDest()) == Ptr)
if (GetUnderlyingObject(MI->getRawDest(),
MI->getModule()->getDataLayout()) == Ptr)
return true;
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
if (MTI->getSourceAddressSpace() == 0)
if (GetUnderlyingObject(MTI->getRawSource()) == Ptr)
if (GetUnderlyingObject(MTI->getRawSource(),
MTI->getModule()->getDataLayout()) == Ptr)
return true;
}
return false;
Expand All @@ -613,10 +610,11 @@ bool LazyValueInfoCache::solveBlockValueNonLocal(LVILatticeVal &BBLV,
if (isKnownNonNull(Val)) {
NotNull = true;
} else {
Value *UnderlyingVal = GetUnderlyingObject(Val);
const DataLayout &DL = BB->getModule()->getDataLayout();
Value *UnderlyingVal = GetUnderlyingObject(Val, DL);
// If 'GetUnderlyingObject' didn't converge, skip it. It won't converge
// inside InstructionDereferencesPointer either.
if (UnderlyingVal == GetUnderlyingObject(UnderlyingVal, nullptr, 1)) {
if (UnderlyingVal == GetUnderlyingObject(UnderlyingVal, DL, 1)) {
for (Instruction &I : *BB) {
if (InstructionDereferencesPointer(&I, UnderlyingVal)) {
NotNull = true;
Expand Down Expand Up @@ -650,7 +648,7 @@ bool LazyValueInfoCache::solveBlockValueNonLocal(LVILatticeVal &BBLV,
if (EdgesMissing)
continue;

Result.mergeIn(EdgeResult);
Result.mergeIn(EdgeResult, DL);

// If we hit overdefined, exit early. The BlockVals entry is already set
// to overdefined.
Expand Down Expand Up @@ -695,7 +693,7 @@ bool LazyValueInfoCache::solveBlockValuePHINode(LVILatticeVal &BBLV,
if (EdgesMissing)
continue;

Result.mergeIn(EdgeResult);
Result.mergeIn(EdgeResult, DL);

// If we hit overdefined, exit early. The BlockVals entry is already set
// to overdefined.
Expand Down Expand Up @@ -734,7 +732,7 @@ void LazyValueInfoCache::mergeAssumeBlockValueConstantRange(Value *Val,
if (!AssumeVH)
continue;
auto *I = cast<CallInst>(AssumeVH);
if (!isValidAssumeForContext(I, BBI, DL, DT))
if (!isValidAssumeForContext(I, BBI, DT))
continue;

Value *C = I->getArgOperand(0);
Expand All @@ -744,7 +742,7 @@ void LazyValueInfoCache::mergeAssumeBlockValueConstantRange(Value *Val,
if (BBLV.isOverdefined())
BBLV = Result;
else
BBLV.mergeIn(Result);
BBLV.mergeIn(Result, DL);
}
}
}
Expand Down Expand Up @@ -1103,26 +1101,27 @@ void LazyValueInfoCache::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,

/// This lazily constructs the LazyValueInfoCache.
static LazyValueInfoCache &getCache(void *&PImpl, AssumptionCache *AC,
const DataLayout *DL = nullptr,
const DataLayout *DL,
DominatorTree *DT = nullptr) {
if (!PImpl)
PImpl = new LazyValueInfoCache(AC, DL, DT);
if (!PImpl) {
assert(DL && "getCache() called with a null DataLayout");
PImpl = new LazyValueInfoCache(AC, *DL, DT);
}
return *static_cast<LazyValueInfoCache*>(PImpl);
}

bool LazyValueInfo::runOnFunction(Function &F) {
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
const DataLayout &DL = F.getParent()->getDataLayout();

DominatorTreeWrapperPass *DTWP =
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
DT = DTWP ? &DTWP->getDomTree() : nullptr;

DL = &F.getParent()->getDataLayout();

TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();

if (PImpl)
getCache(PImpl, AC, DL, DT).clear();
getCache(PImpl, AC, &DL, DT).clear();

// Fully lazy.
return false;
Expand All @@ -1137,15 +1136,16 @@ void LazyValueInfo::getAnalysisUsage(AnalysisUsage &AU) const {
void LazyValueInfo::releaseMemory() {
// If the cache was allocated, free it.
if (PImpl) {
delete &getCache(PImpl, AC);
delete &getCache(PImpl, AC, nullptr);
PImpl = nullptr;
}
}

Constant *LazyValueInfo::getConstant(Value *V, BasicBlock *BB,
Instruction *CxtI) {
const DataLayout &DL = BB->getModule()->getDataLayout();
LVILatticeVal Result =
getCache(PImpl, AC, DL, DT).getValueInBlock(V, BB, CxtI);
getCache(PImpl, AC, &DL, DT).getValueInBlock(V, BB, CxtI);

if (Result.isConstant())
return Result.getConstant();
Expand All @@ -1162,8 +1162,9 @@ Constant *LazyValueInfo::getConstant(Value *V, BasicBlock *BB,
Constant *LazyValueInfo::getConstantOnEdge(Value *V, BasicBlock *FromBB,
BasicBlock *ToBB,
Instruction *CxtI) {
const DataLayout &DL = FromBB->getModule()->getDataLayout();
LVILatticeVal Result =
getCache(PImpl, AC, DL, DT).getValueOnEdge(V, FromBB, ToBB, CxtI);
getCache(PImpl, AC, &DL, DT).getValueOnEdge(V, FromBB, ToBB, CxtI);

if (Result.isConstant())
return Result.getConstant();
Expand All @@ -1175,9 +1176,10 @@ Constant *LazyValueInfo::getConstantOnEdge(Value *V, BasicBlock *FromBB,
return nullptr;
}

static LazyValueInfo::Tristate
getPredicateResult(unsigned Pred, Constant *C, LVILatticeVal &Result,
const DataLayout *DL, TargetLibraryInfo *TLI) {
static LazyValueInfo::Tristate getPredicateResult(unsigned Pred, Constant *C,
LVILatticeVal &Result,
const DataLayout &DL,
TargetLibraryInfo *TLI) {

// If we know the value is a constant, evaluate the conditional.
Constant *Res = nullptr;
Expand Down Expand Up @@ -1248,27 +1250,33 @@ LazyValueInfo::Tristate
LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
BasicBlock *FromBB, BasicBlock *ToBB,
Instruction *CxtI) {
const DataLayout &DL = FromBB->getModule()->getDataLayout();
LVILatticeVal Result =
getCache(PImpl, AC, DL, DT).getValueOnEdge(V, FromBB, ToBB, CxtI);
getCache(PImpl, AC, &DL, DT).getValueOnEdge(V, FromBB, ToBB, CxtI);

return getPredicateResult(Pred, C, Result, DL, TLI);
}

LazyValueInfo::Tristate
LazyValueInfo::getPredicateAt(unsigned Pred, Value *V, Constant *C,
Instruction *CxtI) {
LVILatticeVal Result = getCache(PImpl, AC, DL, DT).getValueAt(V, CxtI);
const DataLayout &DL = CxtI->getModule()->getDataLayout();
LVILatticeVal Result = getCache(PImpl, AC, &DL, DT).getValueAt(V, CxtI);

return getPredicateResult(Pred, C, Result, DL, TLI);
}

void LazyValueInfo::threadEdge(BasicBlock *PredBB, BasicBlock *OldSucc,
BasicBlock *NewSucc) {
if (PImpl)
getCache(PImpl, AC, DL, DT).threadEdge(PredBB, OldSucc, NewSucc);
if (PImpl) {
const DataLayout &DL = PredBB->getModule()->getDataLayout();
getCache(PImpl, AC, &DL, DT).threadEdge(PredBB, OldSucc, NewSucc);
}
}

void LazyValueInfo::eraseBlock(BasicBlock *BB) {
if (PImpl)
getCache(PImpl, AC, DL, DT).eraseBlock(BB);
if (PImpl) {
const DataLayout &DL = BB->getModule()->getDataLayout();
getCache(PImpl, AC, &DL, DT).eraseBlock(BB);
}
}
113 changes: 58 additions & 55 deletions llvm/lib/Analysis/Lint.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -98,16 +98,15 @@ namespace {
void visitInsertElementInst(InsertElementInst &I);
void visitUnreachableInst(UnreachableInst &I);

Value *findValue(Value *V, bool OffsetOk) const;
Value *findValueImpl(Value *V, bool OffsetOk,
Value *findValue(Value *V, const DataLayout &DL, bool OffsetOk) const;
Value *findValueImpl(Value *V, const DataLayout &DL, bool OffsetOk,
SmallPtrSetImpl<Value *> &Visited) const;

public:
Module *Mod;
AliasAnalysis *AA;
AssumptionCache *AC;
DominatorTree *DT;
const DataLayout *DL;
TargetLibraryInfo *TLI;

std::string Messages;
Expand Down Expand Up @@ -175,7 +174,6 @@ bool Lint::runOnFunction(Function &F) {
AA = &getAnalysis<AliasAnalysis>();
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
DL = &F.getParent()->getDataLayout();
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
visit(F);
dbgs() << MessagesStr.str();
Expand All @@ -195,11 +193,13 @@ void Lint::visitFunction(Function &F) {
void Lint::visitCallSite(CallSite CS) {
Instruction &I = *CS.getInstruction();
Value *Callee = CS.getCalledValue();
const DataLayout &DL = CS->getModule()->getDataLayout();

visitMemoryReference(I, Callee, AliasAnalysis::UnknownSize,
0, nullptr, MemRef::Callee);

if (Function *F = dyn_cast<Function>(findValue(Callee, /*OffsetOk=*/false))) {
if (Function *F = dyn_cast<Function>(findValue(Callee, DL,
/*OffsetOk=*/false))) {
Assert(CS.getCallingConv() == F->getCallingConv(),
"Undefined behavior: Caller and callee calling convention differ",
&I);
Expand Down Expand Up @@ -248,8 +248,8 @@ void Lint::visitCallSite(CallSite CS) {
Type *Ty =
cast<PointerType>(Formal->getType())->getElementType();
visitMemoryReference(I, Actual, AA->getTypeStoreSize(Ty),
DL ? DL->getABITypeAlignment(Ty) : 0,
Ty, MemRef::Read | MemRef::Write);
DL.getABITypeAlignment(Ty), Ty,
MemRef::Read | MemRef::Write);
}
}
}
Expand All @@ -258,9 +258,10 @@ void Lint::visitCallSite(CallSite CS) {
if (CS.isCall() && cast<CallInst>(CS.getInstruction())->isTailCall())
for (CallSite::arg_iterator AI = CS.arg_begin(), AE = CS.arg_end();
AI != AE; ++AI) {
Value *Obj = findValue(*AI, /*OffsetOk=*/true);
Value *Obj = findValue(*AI, DL, /*OffsetOk=*/true);
Assert(!isa<AllocaInst>(Obj),
"Undefined behavior: Call with \"tail\" keyword references alloca",
"Undefined behavior: Call with \"tail\" keyword references "
"alloca",
&I);
}

Expand All @@ -286,8 +287,8 @@ void Lint::visitCallSite(CallSite CS) {
// overlap is not distinguished from the case where nothing is known.
uint64_t Size = 0;
if (const ConstantInt *Len =
dyn_cast<ConstantInt>(findValue(MCI->getLength(),
/*OffsetOk=*/false)))
dyn_cast<ConstantInt>(findValue(MCI->getLength(), DL,
/*OffsetOk=*/false)))
if (Len->getValue().isIntN(32))
Size = Len->getValue().getZExtValue();
Assert(AA->alias(MCI->getSource(), Size, MCI->getDest(), Size) !=
Expand Down Expand Up @@ -365,7 +366,8 @@ void Lint::visitReturnInst(ReturnInst &I) {
"Unusual: Return statement in function with noreturn attribute", &I);

if (Value *V = I.getReturnValue()) {
Value *Obj = findValue(V, /*OffsetOk=*/true);
Value *Obj =
findValue(V, F->getParent()->getDataLayout(), /*OffsetOk=*/true);
Assert(!isa<AllocaInst>(Obj), "Unusual: Returning alloca value", &I);
}
}
Expand All @@ -380,7 +382,8 @@ void Lint::visitMemoryReference(Instruction &I,
if (Size == 0)
return;

Value *UnderlyingObject = findValue(Ptr, /*OffsetOk=*/true);
Value *UnderlyingObject =
findValue(Ptr, I.getModule()->getDataLayout(), /*OffsetOk=*/true);
Assert(!isa<ConstantPointerNull>(UnderlyingObject),
"Undefined behavior: Null pointer dereference", &I);
Assert(!isa<UndefValue>(UnderlyingObject),
Expand Down Expand Up @@ -419,6 +422,7 @@ void Lint::visitMemoryReference(Instruction &I,
// Check for buffer overflows and misalignment.
// Only handles memory references that read/write something simple like an
// alloca instruction or a global variable.
auto &DL = I.getModule()->getDataLayout();
int64_t Offset = 0;
if (Value *Base = GetPointerBaseWithConstantOffset(Ptr, Offset, DL)) {
// OK, so the access is to a constant offset from Ptr. Check that Ptr is
Expand All @@ -429,21 +433,21 @@ void Lint::visitMemoryReference(Instruction &I,

if (AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
Type *ATy = AI->getAllocatedType();
if (DL && !AI->isArrayAllocation() && ATy->isSized())
BaseSize = DL->getTypeAllocSize(ATy);
if (!AI->isArrayAllocation() && ATy->isSized())
BaseSize = DL.getTypeAllocSize(ATy);
BaseAlign = AI->getAlignment();
if (DL && BaseAlign == 0 && ATy->isSized())
BaseAlign = DL->getABITypeAlignment(ATy);
if (BaseAlign == 0 && ATy->isSized())
BaseAlign = DL.getABITypeAlignment(ATy);
} else if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Base)) {
// If the global may be defined differently in another compilation unit
// then don't warn about funky memory accesses.
if (GV->hasDefinitiveInitializer()) {
Type *GTy = GV->getType()->getElementType();
if (DL && GTy->isSized())
BaseSize = DL->getTypeAllocSize(GTy);
if (GTy->isSized())
BaseSize = DL.getTypeAllocSize(GTy);
BaseAlign = GV->getAlignment();
if (DL && BaseAlign == 0 && GTy->isSized())
BaseAlign = DL->getABITypeAlignment(GTy);
if (BaseAlign == 0 && GTy->isSized())
BaseAlign = DL.getABITypeAlignment(GTy);
}
}

Expand All @@ -456,8 +460,8 @@ void Lint::visitMemoryReference(Instruction &I,

// Accesses that say that the memory is more aligned than it is are not
// defined.
if (DL && Align == 0 && Ty && Ty->isSized())
Align = DL->getABITypeAlignment(Ty);
if (Align == 0 && Ty && Ty->isSized())
Align = DL.getABITypeAlignment(Ty);
Assert(!BaseAlign || Align <= MinAlign(BaseAlign, Offset),
"Undefined behavior: Memory reference address is misaligned", &I);
}
Expand Down Expand Up @@ -487,22 +491,23 @@ void Lint::visitSub(BinaryOperator &I) {
}

void Lint::visitLShr(BinaryOperator &I) {
if (ConstantInt *CI =
dyn_cast<ConstantInt>(findValue(I.getOperand(1), /*OffsetOk=*/false)))
if (ConstantInt *CI = dyn_cast<ConstantInt>(
findValue(I.getOperand(1), I.getModule()->getDataLayout(),
/*OffsetOk=*/false)))
Assert(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()),
"Undefined result: Shift count out of range", &I);
}

void Lint::visitAShr(BinaryOperator &I) {
if (ConstantInt *CI =
dyn_cast<ConstantInt>(findValue(I.getOperand(1), /*OffsetOk=*/false)))
if (ConstantInt *CI = dyn_cast<ConstantInt>(findValue(
I.getOperand(1), I.getModule()->getDataLayout(), /*OffsetOk=*/false)))
Assert(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()),
"Undefined result: Shift count out of range", &I);
}

void Lint::visitShl(BinaryOperator &I) {
if (ConstantInt *CI =
dyn_cast<ConstantInt>(findValue(I.getOperand(1), /*OffsetOk=*/false)))
if (ConstantInt *CI = dyn_cast<ConstantInt>(findValue(
I.getOperand(1), I.getModule()->getDataLayout(), /*OffsetOk=*/false)))
Assert(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()),
"Undefined result: Shift count out of range", &I);
}
Expand Down Expand Up @@ -688,7 +693,7 @@ void Lint::visitEHEndCatch(IntrinsicInst *II) {
II);
}

static bool isZero(Value *V, const DataLayout *DL, DominatorTree *DT,
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT,
AssumptionCache *AC) {
// Assume undef could be zero.
if (isa<UndefValue>(V))
Expand Down Expand Up @@ -729,22 +734,22 @@ static bool isZero(Value *V, const DataLayout *DL, DominatorTree *DT,
}

void Lint::visitSDiv(BinaryOperator &I) {
Assert(!isZero(I.getOperand(1), DL, DT, AC),
Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC),
"Undefined behavior: Division by zero", &I);
}

void Lint::visitUDiv(BinaryOperator &I) {
Assert(!isZero(I.getOperand(1), DL, DT, AC),
Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC),
"Undefined behavior: Division by zero", &I);
}

void Lint::visitSRem(BinaryOperator &I) {
Assert(!isZero(I.getOperand(1), DL, DT, AC),
Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC),
"Undefined behavior: Division by zero", &I);
}

void Lint::visitURem(BinaryOperator &I) {
Assert(!isZero(I.getOperand(1), DL, DT, AC),
Assert(!isZero(I.getOperand(1), I.getModule()->getDataLayout(), DT, AC),
"Undefined behavior: Division by zero", &I);
}

Expand All @@ -771,17 +776,17 @@ void Lint::visitIndirectBrInst(IndirectBrInst &I) {
}

void Lint::visitExtractElementInst(ExtractElementInst &I) {
if (ConstantInt *CI =
dyn_cast<ConstantInt>(findValue(I.getIndexOperand(),
/*OffsetOk=*/false)))
if (ConstantInt *CI = dyn_cast<ConstantInt>(
findValue(I.getIndexOperand(), I.getModule()->getDataLayout(),
/*OffsetOk=*/false)))
Assert(CI->getValue().ult(I.getVectorOperandType()->getNumElements()),
"Undefined result: extractelement index out of range", &I);
}

void Lint::visitInsertElementInst(InsertElementInst &I) {
if (ConstantInt *CI =
dyn_cast<ConstantInt>(findValue(I.getOperand(2),
/*OffsetOk=*/false)))
if (ConstantInt *CI = dyn_cast<ConstantInt>(
findValue(I.getOperand(2), I.getModule()->getDataLayout(),
/*OffsetOk=*/false)))
Assert(CI->getValue().ult(I.getType()->getNumElements()),
"Undefined result: insertelement index out of range", &I);
}
Expand All @@ -802,13 +807,13 @@ void Lint::visitUnreachableInst(UnreachableInst &I) {
/// Most analysis passes don't require this logic, because instcombine
/// will simplify most of these kinds of things away. But it's a goal of
/// this Lint pass to be useful even on non-optimized IR.
Value *Lint::findValue(Value *V, bool OffsetOk) const {
Value *Lint::findValue(Value *V, const DataLayout &DL, bool OffsetOk) const {
SmallPtrSet<Value *, 4> Visited;
return findValueImpl(V, OffsetOk, Visited);
return findValueImpl(V, DL, OffsetOk, Visited);
}

/// findValueImpl - Implementation helper for findValue.
Value *Lint::findValueImpl(Value *V, bool OffsetOk,
Value *Lint::findValueImpl(Value *V, const DataLayout &DL, bool OffsetOk,
SmallPtrSetImpl<Value *> &Visited) const {
// Detect self-referential values.
if (!Visited.insert(V).second)
Expand All @@ -829,7 +834,7 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk,
break;
if (Value *U = FindAvailableLoadedValue(L->getPointerOperand(),
BB, BBI, 6, AA))
return findValueImpl(U, OffsetOk, Visited);
return findValueImpl(U, DL, OffsetOk, Visited);
if (BBI != BB->begin()) break;
BB = BB->getUniquePredecessor();
if (!BB) break;
Expand All @@ -838,40 +843,38 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk,
} else if (PHINode *PN = dyn_cast<PHINode>(V)) {
if (Value *W = PN->hasConstantValue())
if (W != V)
return findValueImpl(W, OffsetOk, Visited);
return findValueImpl(W, DL, OffsetOk, Visited);
} else if (CastInst *CI = dyn_cast<CastInst>(V)) {
if (CI->isNoopCast(DL))
return findValueImpl(CI->getOperand(0), OffsetOk, Visited);
return findValueImpl(CI->getOperand(0), DL, OffsetOk, Visited);
} else if (ExtractValueInst *Ex = dyn_cast<ExtractValueInst>(V)) {
if (Value *W = FindInsertedValue(Ex->getAggregateOperand(),
Ex->getIndices()))
if (W != V)
return findValueImpl(W, OffsetOk, Visited);
return findValueImpl(W, DL, OffsetOk, Visited);
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
// Same as above, but for ConstantExpr instead of Instruction.
if (Instruction::isCast(CE->getOpcode())) {
if (CastInst::isNoopCast(Instruction::CastOps(CE->getOpcode()),
CE->getOperand(0)->getType(),
CE->getType(),
DL ? DL->getIntPtrType(V->getType()) :
Type::getInt64Ty(V->getContext())))
return findValueImpl(CE->getOperand(0), OffsetOk, Visited);
CE->getOperand(0)->getType(), CE->getType(),
DL.getIntPtrType(V->getType())))
return findValueImpl(CE->getOperand(0), DL, OffsetOk, Visited);
} else if (CE->getOpcode() == Instruction::ExtractValue) {
ArrayRef<unsigned> Indices = CE->getIndices();
if (Value *W = FindInsertedValue(CE->getOperand(0), Indices))
if (W != V)
return findValueImpl(W, OffsetOk, Visited);
return findValueImpl(W, DL, OffsetOk, Visited);
}
}

// As a last resort, try SimplifyInstruction or constant folding.
if (Instruction *Inst = dyn_cast<Instruction>(V)) {
if (Value *W = SimplifyInstruction(Inst, DL, TLI, DT, AC))
return findValueImpl(W, OffsetOk, Visited);
return findValueImpl(W, DL, OffsetOk, Visited);
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
if (Value *W = ConstantFoldConstantExpression(CE, DL, TLI))
if (W != V)
return findValueImpl(W, OffsetOk, Visited);
return findValueImpl(W, DL, OffsetOk, Visited);
}

return V;
Expand Down
26 changes: 11 additions & 15 deletions llvm/lib/Analysis/Loads.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,8 @@ static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
/// This uses the pointee type to determine how many bytes need to be safe to
/// load from the pointer.
bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
unsigned Align, const DataLayout *DL) {
unsigned Align) {
const DataLayout &DL = ScanFrom->getModule()->getDataLayout();
int64_t ByteOffset = 0;
Value *Base = V;
Base = GetPointerBaseWithConstantOffset(V, ByteOffset, DL);
Expand All @@ -88,19 +89,19 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
}

PointerType *AddrTy = cast<PointerType>(V->getType());
uint64_t LoadSize = DL ? DL->getTypeStoreSize(AddrTy->getElementType()) : 0;
uint64_t LoadSize = DL.getTypeStoreSize(AddrTy->getElementType());

// If we found a base allocated type from either an alloca or global variable,
// try to see if we are definitively within the allocated region. We need to
// know the size of the base type and the loaded type to do anything in this
// case, so only try this when we have the DataLayout available.
if (BaseType && BaseType->isSized() && DL) {
// case.
if (BaseType && BaseType->isSized()) {
if (BaseAlign == 0)
BaseAlign = DL->getPrefTypeAlignment(BaseType);
BaseAlign = DL.getPrefTypeAlignment(BaseType);

if (Align <= BaseAlign) {
// Check if the load is within the bounds of the underlying object.
if (ByteOffset + LoadSize <= DL->getTypeAllocSize(BaseType) &&
if (ByteOffset + LoadSize <= DL.getTypeAllocSize(BaseType) &&
(Align == 0 || (ByteOffset % Align) == 0))
return true;
}
Expand Down Expand Up @@ -134,16 +135,13 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
else
continue;

// Handle trivial cases even w/o DataLayout or other work.
// Handle trivial cases.
if (AccessedPtr == V)
return true;

if (!DL)
continue;

auto *AccessedTy = cast<PointerType>(AccessedPtr->getType());
if (AreEquivalentAddressValues(AccessedPtr->stripPointerCasts(), V) &&
LoadSize <= DL->getTypeStoreSize(AccessedTy->getElementType()))
LoadSize <= DL.getTypeStoreSize(AccessedTy->getElementType()))
return true;
}
return false;
Expand Down Expand Up @@ -177,8 +175,6 @@ Value *llvm::FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,

Type *AccessTy = cast<PointerType>(Ptr->getType())->getElementType();

// Try to get the DataLayout for this module. This may be null, in which case
// the optimizations will be limited.
const DataLayout &DL = ScanBB->getModule()->getDataLayout();

// Try to get the store size for the type.
Expand Down Expand Up @@ -207,7 +203,7 @@ Value *llvm::FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
if (AreEquivalentAddressValues(
LI->getPointerOperand()->stripPointerCasts(), StrippedPtr) &&
CastInst::isBitOrNoopPointerCastable(LI->getType(), AccessTy, &DL)) {
CastInst::isBitOrNoopPointerCastable(LI->getType(), AccessTy, DL)) {
if (AATags)
LI->getAAMetadata(*AATags);
return LI;
Expand All @@ -220,7 +216,7 @@ Value *llvm::FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
// those cases are unlikely.)
if (AreEquivalentAddressValues(StorePtr, StrippedPtr) &&
CastInst::isBitOrNoopPointerCastable(SI->getValueOperand()->getType(),
AccessTy, &DL)) {
AccessTy, DL)) {
if (AATags)
SI->getAAMetadata(*AATags);
return SI->getOperand(0);
Expand Down
51 changes: 26 additions & 25 deletions llvm/lib/Analysis/LoopAccessAnalysis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -168,8 +168,8 @@ class AccessAnalysis {
/// \brief Set of potential dependent memory accesses.
typedef EquivalenceClasses<MemAccessInfo> DepCandidates;

AccessAnalysis(const DataLayout *Dl, AliasAnalysis *AA, DepCandidates &DA) :
DL(Dl), AST(*AA), DepCands(DA), IsRTCheckNeeded(false) {}
AccessAnalysis(const DataLayout &Dl, AliasAnalysis *AA, DepCandidates &DA)
: DL(Dl), AST(*AA), DepCands(DA), IsRTCheckNeeded(false) {}

/// \brief Register a load and whether it is only read from.
void addLoad(AliasAnalysis::Location &Loc, bool IsReadOnly) {
Expand Down Expand Up @@ -217,14 +217,14 @@ class AccessAnalysis {
/// Set of all accesses.
PtrAccessSet Accesses;

const DataLayout &DL;

/// Set of accesses that need a further dependence check.
MemAccessInfoSet CheckDeps;

/// Set of pointers that are read only.
SmallPtrSet<Value*, 16> ReadOnlyPtr;

const DataLayout *DL;

/// An alias set tracker to partition the access set by underlying object and
//intrinsic property (such as TBAA metadata).
AliasSetTracker AST;
Expand Down Expand Up @@ -252,8 +252,8 @@ static bool hasComputableBounds(ScalarEvolution *SE,

/// \brief Check the stride of the pointer and ensure that it does not wrap in
/// the address space.
static int isStridedPtr(ScalarEvolution *SE, const DataLayout *DL, Value *Ptr,
const Loop *Lp, const ValueToValueMap &StridesMap);
static int isStridedPtr(ScalarEvolution *SE, Value *Ptr, const Loop *Lp,
const ValueToValueMap &StridesMap);

bool AccessAnalysis::canCheckPtrAtRT(
LoopAccessInfo::RuntimePointerCheck &RtCheck, unsigned &NumComparisons,
Expand Down Expand Up @@ -289,10 +289,10 @@ bool AccessAnalysis::canCheckPtrAtRT(
++NumReadPtrChecks;

if (hasComputableBounds(SE, StridesMap, Ptr) &&
// When we run after a failing dependency check we have to make sure we
// don't have wrapping pointers.
// When we run after a failing dependency check we have to make sure
// we don't have wrapping pointers.
(!ShouldCheckStride ||
isStridedPtr(SE, DL, Ptr, TheLoop, StridesMap) == 1)) {
isStridedPtr(SE, Ptr, TheLoop, StridesMap) == 1)) {
// The id of the dependence set.
unsigned DepId;

Expand Down Expand Up @@ -498,8 +498,8 @@ class MemoryDepChecker {
typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
typedef SmallPtrSet<MemAccessInfo, 8> MemAccessInfoSet;

MemoryDepChecker(ScalarEvolution *Se, const DataLayout *Dl, const Loop *L)
: SE(Se), DL(Dl), InnermostLoop(L), AccessIdx(0),
MemoryDepChecker(ScalarEvolution *Se, const Loop *L)
: SE(Se), InnermostLoop(L), AccessIdx(0),
ShouldRetryWithRuntimeCheck(false) {}

/// \brief Register the location (instructions are given increasing numbers)
Expand Down Expand Up @@ -536,7 +536,6 @@ class MemoryDepChecker {

private:
ScalarEvolution *SE;
const DataLayout *DL;
const Loop *InnermostLoop;

/// \brief Maps access locations (ptr, read/write) to program order.
Expand Down Expand Up @@ -585,8 +584,8 @@ static bool isInBoundsGep(Value *Ptr) {
}

/// \brief Check whether the access through \p Ptr has a constant stride.
static int isStridedPtr(ScalarEvolution *SE, const DataLayout *DL, Value *Ptr,
const Loop *Lp, const ValueToValueMap &StridesMap) {
static int isStridedPtr(ScalarEvolution *SE, Value *Ptr, const Loop *Lp,
const ValueToValueMap &StridesMap) {
const Type *Ty = Ptr->getType();
assert(Ty->isPointerTy() && "Unexpected non-ptr");

Expand Down Expand Up @@ -640,7 +639,8 @@ static int isStridedPtr(ScalarEvolution *SE, const DataLayout *DL, Value *Ptr,
return 0;
}

int64_t Size = DL->getTypeAllocSize(PtrTy->getElementType());
auto &DL = Lp->getHeader()->getModule()->getDataLayout();
int64_t Size = DL.getTypeAllocSize(PtrTy->getElementType());
const APInt &APStepVal = C->getValue()->getValue();

// Huge step value - give up.
Expand Down Expand Up @@ -726,8 +726,8 @@ bool MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
const SCEV *AScev = replaceSymbolicStrideSCEV(SE, Strides, APtr);
const SCEV *BScev = replaceSymbolicStrideSCEV(SE, Strides, BPtr);

int StrideAPtr = isStridedPtr(SE, DL, APtr, InnermostLoop, Strides);
int StrideBPtr = isStridedPtr(SE, DL, BPtr, InnermostLoop, Strides);
int StrideAPtr = isStridedPtr(SE, APtr, InnermostLoop, Strides);
int StrideBPtr = isStridedPtr(SE, BPtr, InnermostLoop, Strides);

const SCEV *Src = AScev;
const SCEV *Sink = BScev;
Expand Down Expand Up @@ -768,7 +768,8 @@ bool MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,

Type *ATy = APtr->getType()->getPointerElementType();
Type *BTy = BPtr->getType()->getPointerElementType();
unsigned TypeByteSize = DL->getTypeAllocSize(ATy);
auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout();
unsigned TypeByteSize = DL.getTypeAllocSize(ATy);

// Negative distances are not plausible dependencies.
const APInt &Val = C->getValue()->getValue();
Expand Down Expand Up @@ -939,7 +940,7 @@ void LoopAccessInfo::analyzeLoop(const ValueToValueMap &Strides) {
PtrRtCheck.Need = false;

const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
MemoryDepChecker DepChecker(SE, DL, TheLoop);
MemoryDepChecker DepChecker(SE, TheLoop);

// For each block.
for (Loop::block_iterator bb = TheLoop->block_begin(),
Expand Down Expand Up @@ -1009,7 +1010,8 @@ void LoopAccessInfo::analyzeLoop(const ValueToValueMap &Strides) {
}

AccessAnalysis::DepCandidates DependentAccesses;
AccessAnalysis Accesses(DL, AA, DependentAccesses);
AccessAnalysis Accesses(TheLoop->getHeader()->getModule()->getDataLayout(),
AA, DependentAccesses);

// Holds the analyzed pointers. We don't want to call GetUnderlyingObjects
// multiple times on the same object. If the ptr is accessed twice, once
Expand Down Expand Up @@ -1068,8 +1070,7 @@ void LoopAccessInfo::analyzeLoop(const ValueToValueMap &Strides) {
// read a few words, modify, and write a few words, and some of the
// words may be written to the same address.
bool IsReadOnlyPtr = false;
if (Seen.insert(Ptr).second ||
!isStridedPtr(SE, DL, Ptr, TheLoop, Strides)) {
if (Seen.insert(Ptr).second || !isStridedPtr(SE, Ptr, TheLoop, Strides)) {
++NumReads;
IsReadOnlyPtr = true;
}
Expand Down Expand Up @@ -1223,7 +1224,7 @@ LoopAccessInfo::addRuntimeCheck(Instruction *Loc) const {
SmallVector<TrackingVH<Value> , 2> Ends;

LLVMContext &Ctx = Loc->getContext();
SCEVExpander Exp(*SE, "induction");
SCEVExpander Exp(*SE, DL, "induction");
Instruction *FirstInst = nullptr;

for (unsigned i = 0; i < NumPointers; ++i) {
Expand Down Expand Up @@ -1298,7 +1299,7 @@ LoopAccessInfo::addRuntimeCheck(Instruction *Loc) const {
}

LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE,
const DataLayout *DL,
const DataLayout &DL,
const TargetLibraryInfo *TLI, AliasAnalysis *AA,
DominatorTree *DT,
const ValueToValueMap &Strides)
Expand Down Expand Up @@ -1336,6 +1337,7 @@ LoopAccessAnalysis::getInfo(Loop *L, const ValueToValueMap &Strides) {
#endif

if (!LAI) {
const DataLayout &DL = L->getHeader()->getModule()->getDataLayout();
LAI = llvm::make_unique<LoopAccessInfo>(L, SE, DL, TLI, AA, DT, Strides);
#ifndef NDEBUG
LAI->NumSymbolicStrides = Strides.size();
Expand All @@ -1360,7 +1362,6 @@ void LoopAccessAnalysis::print(raw_ostream &OS, const Module *M) const {

bool LoopAccessAnalysis::runOnFunction(Function &F) {
SE = &getAnalysis<ScalarEvolution>();
DL = &F.getParent()->getDataLayout();
auto *TLIP = getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
TLI = TLIP ? &TLIP->getTLI() : nullptr;
AA = &getAnalysis<AliasAnalysis>();
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Analysis/MemDerefPrinter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ bool MemDerefPrinter::runOnFunction(Function &F) {
for (auto &I: inst_range(F)) {
if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
Value *PO = LI->getPointerOperand();
if (PO->isDereferenceablePointer(&DL))
if (PO->isDereferenceablePointer(DL))
Vec.push_back(PO);
}
}
Expand Down
46 changes: 21 additions & 25 deletions llvm/lib/Analysis/MemoryBuiltins.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -206,20 +206,20 @@ const CallInst *llvm::extractMallocCall(const Value *I,
return isMallocLikeFn(I, TLI) ? dyn_cast<CallInst>(I) : nullptr;
}

static Value *computeArraySize(const CallInst *CI, const DataLayout *DL,
static Value *computeArraySize(const CallInst *CI, const DataLayout &DL,
const TargetLibraryInfo *TLI,
bool LookThroughSExt = false) {
if (!CI)
return nullptr;

// The size of the malloc's result type must be known to determine array size.
Type *T = getMallocAllocatedType(CI, TLI);
if (!T || !T->isSized() || !DL)
if (!T || !T->isSized())
return nullptr;

unsigned ElementSize = DL->getTypeAllocSize(T);
unsigned ElementSize = DL.getTypeAllocSize(T);
if (StructType *ST = dyn_cast<StructType>(T))
ElementSize = DL->getStructLayout(ST)->getSizeInBytes();
ElementSize = DL.getStructLayout(ST)->getSizeInBytes();

// If malloc call's arg can be determined to be a multiple of ElementSize,
// return the multiple. Otherwise, return NULL.
Expand Down Expand Up @@ -280,7 +280,7 @@ Type *llvm::getMallocAllocatedType(const CallInst *CI,
/// then return that multiple. For non-array mallocs, the multiple is
/// constant 1. Otherwise, return NULL for mallocs whose array size cannot be
/// determined.
Value *llvm::getMallocArraySize(CallInst *CI, const DataLayout *DL,
Value *llvm::getMallocArraySize(CallInst *CI, const DataLayout &DL,
const TargetLibraryInfo *TLI,
bool LookThroughSExt) {
assert(isMallocLikeFn(CI, TLI) && "getMallocArraySize and not malloc call");
Expand Down Expand Up @@ -350,11 +350,8 @@ const CallInst *llvm::isFreeCall(const Value *I, const TargetLibraryInfo *TLI) {
/// object size in Size if successful, and false otherwise.
/// If RoundToAlign is true, then Size is rounded up to the aligment of allocas,
/// byval arguments, and global variables.
bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout *DL,
bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL,
const TargetLibraryInfo *TLI, bool RoundToAlign) {
if (!DL)
return false;

ObjectSizeOffsetVisitor Visitor(DL, TLI, Ptr->getContext(), RoundToAlign);
SizeOffsetType Data = Visitor.compute(const_cast<Value*>(Ptr));
if (!Visitor.bothKnown(Data))
Expand Down Expand Up @@ -382,17 +379,17 @@ APInt ObjectSizeOffsetVisitor::align(APInt Size, uint64_t Align) {
return Size;
}

ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout *DL,
ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout &DL,
const TargetLibraryInfo *TLI,
LLVMContext &Context,
bool RoundToAlign)
: DL(DL), TLI(TLI), RoundToAlign(RoundToAlign) {
: DL(DL), TLI(TLI), RoundToAlign(RoundToAlign) {
// Pointer size must be rechecked for each object visited since it could have
// a different address space.
}

SizeOffsetType ObjectSizeOffsetVisitor::compute(Value *V) {
IntTyBits = DL->getPointerTypeSizeInBits(V->getType());
IntTyBits = DL.getPointerTypeSizeInBits(V->getType());
Zero = APInt::getNullValue(IntTyBits);

V = V->stripPointerCasts();
Expand Down Expand Up @@ -432,7 +429,7 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitAllocaInst(AllocaInst &I) {
if (!I.getAllocatedType()->isSized())
return unknown();

APInt Size(IntTyBits, DL->getTypeAllocSize(I.getAllocatedType()));
APInt Size(IntTyBits, DL.getTypeAllocSize(I.getAllocatedType()));
if (!I.isArrayAllocation())
return std::make_pair(align(Size, I.getAlignment()), Zero);

Expand All @@ -451,7 +448,7 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitArgument(Argument &A) {
return unknown();
}
PointerType *PT = cast<PointerType>(A.getType());
APInt Size(IntTyBits, DL->getTypeAllocSize(PT->getElementType()));
APInt Size(IntTyBits, DL.getTypeAllocSize(PT->getElementType()));
return std::make_pair(align(Size, A.getParamAlignment()), Zero);
}

Expand Down Expand Up @@ -524,7 +521,7 @@ ObjectSizeOffsetVisitor::visitExtractValueInst(ExtractValueInst&) {
SizeOffsetType ObjectSizeOffsetVisitor::visitGEPOperator(GEPOperator &GEP) {
SizeOffsetType PtrData = compute(GEP.getPointerOperand());
APInt Offset(IntTyBits, 0);
if (!bothKnown(PtrData) || !GEP.accumulateConstantOffset(*DL, Offset))
if (!bothKnown(PtrData) || !GEP.accumulateConstantOffset(DL, Offset))
return unknown();

return std::make_pair(PtrData.first, PtrData.second + Offset);
Expand All @@ -540,7 +537,7 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitGlobalVariable(GlobalVariable &GV){
if (!GV.hasDefinitiveInitializer())
return unknown();

APInt Size(IntTyBits, DL->getTypeAllocSize(GV.getType()->getElementType()));
APInt Size(IntTyBits, DL.getTypeAllocSize(GV.getType()->getElementType()));
return std::make_pair(align(Size, GV.getAlignment()), Zero);
}

Expand Down Expand Up @@ -576,19 +573,18 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitInstruction(Instruction &I) {
return unknown();
}

ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator(const DataLayout *DL,
const TargetLibraryInfo *TLI,
LLVMContext &Context,
bool RoundToAlign)
: DL(DL), TLI(TLI), Context(Context), Builder(Context, TargetFolder(DL)),
RoundToAlign(RoundToAlign) {
ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator(
const DataLayout &DL, const TargetLibraryInfo *TLI, LLVMContext &Context,
bool RoundToAlign)
: DL(DL), TLI(TLI), Context(Context), Builder(Context, TargetFolder(DL)),
RoundToAlign(RoundToAlign) {
// IntTy and Zero must be set for each compute() since the address space may
// be different for later objects.
}

SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute(Value *V) {
// XXX - Are vectors of pointers possible here?
IntTy = cast<IntegerType>(DL->getIntPtrType(V->getType()));
IntTy = cast<IntegerType>(DL.getIntPtrType(V->getType()));
Zero = ConstantInt::get(IntTy, 0);

SizeOffsetEvalType Result = compute_(V);
Expand Down Expand Up @@ -670,7 +666,7 @@ SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitAllocaInst(AllocaInst &I) {
assert(I.isArrayAllocation());
Value *ArraySize = I.getArraySize();
Value *Size = ConstantInt::get(ArraySize->getType(),
DL->getTypeAllocSize(I.getAllocatedType()));
DL.getTypeAllocSize(I.getAllocatedType()));
Size = Builder.CreateMul(Size, ArraySize);
return std::make_pair(Size, Zero);
}
Expand Down Expand Up @@ -722,7 +718,7 @@ ObjectSizeOffsetEvaluator::visitGEPOperator(GEPOperator &GEP) {
if (!bothKnown(PtrData))
return unknown();

Value *Offset = EmitGEPOffset(&Builder, *DL, &GEP, /*NoAssumptions=*/true);
Value *Offset = EmitGEPOffset(&Builder, DL, &GEP, /*NoAssumptions=*/true);
Offset = Builder.CreateAdd(PtrData.second, Offset);
return std::make_pair(PtrData.first, Offset);
}
Expand Down
42 changes: 19 additions & 23 deletions llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,6 @@ void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
bool MemoryDependenceAnalysis::runOnFunction(Function &F) {
AA = &getAnalysis<AliasAnalysis>();
AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
DL = &F.getParent()->getDataLayout();
DominatorTreeWrapperPass *DTWP =
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
DT = DTWP ? &DTWP->getDomTree() : nullptr;
Expand Down Expand Up @@ -262,22 +261,17 @@ getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall,
///
/// MemLocBase, MemLocOffset are lazily computed here the first time the
/// base/offs of memloc is needed.
static bool
isLoadLoadClobberIfExtendedToFullWidth(const AliasAnalysis::Location &MemLoc,
const Value *&MemLocBase,
int64_t &MemLocOffs,
const LoadInst *LI,
const DataLayout *DL) {
// If we have no target data, we can't do this.
if (!DL) return false;
static bool isLoadLoadClobberIfExtendedToFullWidth(
const AliasAnalysis::Location &MemLoc, const Value *&MemLocBase,
int64_t &MemLocOffs, const LoadInst *LI) {
const DataLayout &DL = LI->getModule()->getDataLayout();

// If we haven't already computed the base/offset of MemLoc, do so now.
if (!MemLocBase)
MemLocBase = GetPointerBaseWithConstantOffset(MemLoc.Ptr, MemLocOffs, DL);

unsigned Size = MemoryDependenceAnalysis::
getLoadLoadClobberFullWidthSize(MemLocBase, MemLocOffs, MemLoc.Size,
LI, *DL);
unsigned Size = MemoryDependenceAnalysis::getLoadLoadClobberFullWidthSize(
MemLocBase, MemLocOffs, MemLoc.Size, LI);
return Size != 0;
}

Expand All @@ -288,10 +282,9 @@ isLoadLoadClobberIfExtendedToFullWidth(const AliasAnalysis::Location &MemLoc,
/// 2) safe for the target, and 3) would provide the specified memory
/// location value, then this function returns the size in bytes of the
/// load width to use. If not, this returns zero.
unsigned MemoryDependenceAnalysis::
getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
unsigned MemLocSize, const LoadInst *LI,
const DataLayout &DL) {
unsigned MemoryDependenceAnalysis::getLoadLoadClobberFullWidthSize(
const Value *MemLocBase, int64_t MemLocOffs, unsigned MemLocSize,
const LoadInst *LI) {
// We can only extend simple integer loads.
if (!isa<IntegerType>(LI->getType()) || !LI->isSimple()) return 0;

Expand All @@ -300,10 +293,12 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
if (LI->getParent()->getParent()->hasFnAttribute(Attribute::SanitizeThread))
return 0;

const DataLayout &DL = LI->getModule()->getDataLayout();

// Get the base of this load.
int64_t LIOffs = 0;
const Value *LIBase =
GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, &DL);
GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, DL);

// If the two pointers are not based on the same pointer, we can't tell that
// they are related.
Expand Down Expand Up @@ -420,6 +415,8 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
isInvariantLoad = true;
}

const DataLayout &DL = BB->getModule()->getDataLayout();

// Walk backwards through the basic block, looking for dependencies.
while (ScanIt != BB->begin()) {
Instruction *Inst = --ScanIt;
Expand Down Expand Up @@ -504,12 +501,12 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
// location is 1 byte at P+1). If so, return it as a load/load
// clobber result, allowing the client to decide to widen the load if
// it wants to.
if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType()))
if (LI->getAlignment()*8 > ITy->getPrimitiveSizeInBits() &&
if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType())) {
if (LI->getAlignment() * 8 > ITy->getPrimitiveSizeInBits() &&
isLoadLoadClobberIfExtendedToFullWidth(MemLoc, MemLocBase,
MemLocOffset, LI, DL))
MemLocOffset, LI))
return MemDepResult::getClobber(Inst);

}
continue;
}

Expand Down Expand Up @@ -922,8 +919,7 @@ getNonLocalPointerDependency(Instruction *QueryInst,
const_cast<Value *>(Loc.Ptr)));
return;
}


const DataLayout &DL = FromBB->getModule()->getDataLayout();
PHITransAddr Address(const_cast<Value *>(Loc.Ptr), DL, AC);

// This is the set of blocks we've inspected, and the pointer we consider in
Expand Down
Loading