Skip to content

Commit

Permalink
InferAddressSpaces: Move target intrinsic handling to TTI
Browse files Browse the repository at this point in the history
I'm planning on handling intrinsics that will benefit from checking
the address space enums. Don't bother moving the address collection
for now, since those won't need th enums.

llvm-svn: 368895
  • Loading branch information
arsenm committed Aug 14, 2019
1 parent 0eac2a2 commit dbc1f20
Show file tree
Hide file tree
Showing 7 changed files with 118 additions and 23 deletions.
28 changes: 28 additions & 0 deletions llvm/include/llvm/Analysis/TargetTransformInfo.h
Expand Up @@ -368,6 +368,20 @@ class TargetTransformInfo {
/// optimize away.
unsigned getFlatAddressSpace() const;

/// Return any intrinsic address operand indexes which may be rewritten if
/// they use a flat address space pointer.
///
/// \returns true if the intrinsic was handled.
bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
Intrinsic::ID IID) const;

/// Rewrite intrinsic call \p II such that \p OldV will be replaced with \p
/// NewV, which has a different address space. This should happen for every
/// operand index that collectFlatAddressOperands returned for the intrinsic.
/// \returns true if the intrinsic /// was handled.
bool rewriteIntrinsicWithAddressSpace(IntrinsicInst *II,
Value *OldV, Value *NewV) const;

/// Test whether calls to a function lower to actual program function
/// calls.
///
Expand Down Expand Up @@ -1160,6 +1174,10 @@ class TargetTransformInfo::Concept {
virtual bool isSourceOfDivergence(const Value *V) = 0;
virtual bool isAlwaysUniform(const Value *V) = 0;
virtual unsigned getFlatAddressSpace() = 0;
virtual bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
Intrinsic::ID IID) const = 0;
virtual bool rewriteIntrinsicWithAddressSpace(
IntrinsicInst *II, Value *OldV, Value *NewV) const = 0;
virtual bool isLoweredToCall(const Function *F) = 0;
virtual void getUnrollingPreferences(Loop *L, ScalarEvolution &,
UnrollingPreferences &UP) = 0;
Expand Down Expand Up @@ -1400,6 +1418,16 @@ class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
return Impl.getFlatAddressSpace();
}

bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
Intrinsic::ID IID) const override {
return Impl.collectFlatAddressOperands(OpIndexes, IID);
}

bool rewriteIntrinsicWithAddressSpace(
IntrinsicInst *II, Value *OldV, Value *NewV) const override {
return Impl.rewriteIntrinsicWithAddressSpace(II, OldV, NewV);
}

bool isLoweredToCall(const Function *F) override {
return Impl.isLoweredToCall(F);
}
Expand Down
10 changes: 10 additions & 0 deletions llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
Expand Up @@ -156,6 +156,16 @@ class TargetTransformInfoImplBase {
return -1;
}

bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
Intrinsic::ID IID) const {
return false;
}

bool rewriteIntrinsicWithAddressSpace(IntrinsicInst *II,
Value *OldV, Value *NewV) const {
return false;
}

bool isLoweredToCall(const Function *F) {
assert(F && "A concrete function must be provided to this routine.");

Expand Down
10 changes: 10 additions & 0 deletions llvm/include/llvm/CodeGen/BasicTTIImpl.h
Expand Up @@ -215,6 +215,16 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
return -1;
}

bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
Intrinsic::ID IID) const {
return false;
}

bool rewriteIntrinsicWithAddressSpace(IntrinsicInst *II,
Value *OldV, Value *NewV) const {
return false;
}

bool isLegalAddImmediate(int64_t imm) {
return getTLI()->isLegalAddImmediate(imm);
}
Expand Down
10 changes: 10 additions & 0 deletions llvm/lib/Analysis/TargetTransformInfo.cpp
Expand Up @@ -227,6 +227,16 @@ unsigned TargetTransformInfo::getFlatAddressSpace() const {
return TTIImpl->getFlatAddressSpace();
}

bool TargetTransformInfo::collectFlatAddressOperands(
SmallVectorImpl<int> &OpIndexes, Intrinsic::ID IID) const {
return TTIImpl->collectFlatAddressOperands(OpIndexes, IID);
}

bool TargetTransformInfo::rewriteIntrinsicWithAddressSpace(
IntrinsicInst *II, Value *OldV, Value *NewV) const {
return TTIImpl->rewriteIntrinsicWithAddressSpace(II, OldV, NewV);
}

bool TargetTransformInfo::isLoweredToCall(const Function *F) const {
return TTIImpl->isLoweredToCall(F);
}
Expand Down
40 changes: 40 additions & 0 deletions llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
Expand Up @@ -590,6 +590,46 @@ bool GCNTTIImpl::isAlwaysUniform(const Value *V) const {
return false;
}

bool GCNTTIImpl::collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
Intrinsic::ID IID) const {
switch (IID) {
case Intrinsic::amdgcn_atomic_inc:
case Intrinsic::amdgcn_atomic_dec:
case Intrinsic::amdgcn_ds_fadd:
case Intrinsic::amdgcn_ds_fmin:
case Intrinsic::amdgcn_ds_fmax:
OpIndexes.push_back(0);
return true;
default:
return false;
}
}

bool GCNTTIImpl::rewriteIntrinsicWithAddressSpace(
IntrinsicInst *II, Value *OldV, Value *NewV) const {
switch (II->getIntrinsicID()) {
case Intrinsic::amdgcn_atomic_inc:
case Intrinsic::amdgcn_atomic_dec:
case Intrinsic::amdgcn_ds_fadd:
case Intrinsic::amdgcn_ds_fmin:
case Intrinsic::amdgcn_ds_fmax: {
const ConstantInt *IsVolatile = cast<ConstantInt>(II->getArgOperand(4));
if (!IsVolatile->isZero())
return false;
Module *M = II->getParent()->getParent()->getParent();
Type *DestTy = II->getType();
Type *SrcTy = NewV->getType();
Function *NewDecl =
Intrinsic::getDeclaration(M, II->getIntrinsicID(), {DestTy, SrcTy});
II->setArgOperand(0, NewV);
II->setCalledFunction(NewDecl);
return true;
}
default:
return false;
}
}

unsigned GCNTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
Type *SubTp) {
if (ST->hasVOP3PInsts()) {
Expand Down
5 changes: 5 additions & 0 deletions llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h
Expand Up @@ -183,6 +183,11 @@ class GCNTTIImpl final : public BasicTTIImplBase<GCNTTIImpl> {
return AMDGPUAS::FLAT_ADDRESS;
}

bool collectFlatAddressOperands(SmallVectorImpl<int> &OpIndexes,
Intrinsic::ID IID) const;
bool rewriteIntrinsicWithAddressSpace(IntrinsicInst *II,
Value *OldV, Value *NewV) const;

unsigned getVectorSplitCost() { return 0; }

unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
Expand Down
38 changes: 15 additions & 23 deletions llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
Expand Up @@ -141,6 +141,8 @@ using ValueToAddrSpaceMapTy = DenseMap<const Value *, unsigned>;

/// InferAddressSpaces
class InferAddressSpaces : public FunctionPass {
const TargetTransformInfo *TTI;

/// Target specific address space which uses of should be replaced if
/// possible.
unsigned FlatAddrSpace;
Expand Down Expand Up @@ -264,17 +266,6 @@ bool InferAddressSpaces::rewriteIntrinsicOperands(IntrinsicInst *II,
Module *M = II->getParent()->getParent()->getParent();

switch (II->getIntrinsicID()) {
case Intrinsic::amdgcn_atomic_inc:
case Intrinsic::amdgcn_atomic_dec:
case Intrinsic::amdgcn_ds_fadd:
case Intrinsic::amdgcn_ds_fmin:
case Intrinsic::amdgcn_ds_fmax: {
const ConstantInt *IsVolatile = cast<ConstantInt>(II->getArgOperand(4));
if (!IsVolatile->isZero())
return false;

LLVM_FALLTHROUGH;
}
case Intrinsic::objectsize: {
Type *DestTy = II->getType();
Type *SrcTy = NewV->getType();
Expand All @@ -285,25 +276,27 @@ bool InferAddressSpaces::rewriteIntrinsicOperands(IntrinsicInst *II,
return true;
}
default:
return false;
return TTI->rewriteIntrinsicWithAddressSpace(II, OldV, NewV);
}
}

// TODO: Move logic to TTI?
void InferAddressSpaces::collectRewritableIntrinsicOperands(
IntrinsicInst *II, std::vector<std::pair<Value *, bool>> &PostorderStack,
DenseSet<Value *> &Visited) const {
switch (II->getIntrinsicID()) {
auto IID = II->getIntrinsicID();
switch (IID) {
case Intrinsic::objectsize:
case Intrinsic::amdgcn_atomic_inc:
case Intrinsic::amdgcn_atomic_dec:
case Intrinsic::amdgcn_ds_fadd:
case Intrinsic::amdgcn_ds_fmin:
case Intrinsic::amdgcn_ds_fmax:
appendsFlatAddressExpressionToPostorderStack(II->getArgOperand(0),
PostorderStack, Visited);
break;
default:
SmallVector<int, 2> OpIndexes;
if (TTI->collectFlatAddressOperands(OpIndexes, IID)) {
for (int Idx : OpIndexes) {
appendsFlatAddressExpressionToPostorderStack(II->getArgOperand(Idx),
PostorderStack, Visited);
}
}
break;
}
}
Expand Down Expand Up @@ -631,11 +624,10 @@ bool InferAddressSpaces::runOnFunction(Function &F) {
if (skipFunction(F))
return false;

const TargetTransformInfo &TTI =
getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);

if (FlatAddrSpace == UninitializedAddressSpace) {
FlatAddrSpace = TTI.getFlatAddressSpace();
FlatAddrSpace = TTI->getFlatAddressSpace();
if (FlatAddrSpace == UninitializedAddressSpace)
return false;
}
Expand All @@ -650,7 +642,7 @@ bool InferAddressSpaces::runOnFunction(Function &F) {

// Changes the address spaces of the flat address expressions who are inferred
// to point to a specific address space.
return rewriteWithNewAddressSpaces(TTI, Postorder, InferredAddrSpace, &F);
return rewriteWithNewAddressSpaces(*TTI, Postorder, InferredAddrSpace, &F);
}

// Constants need to be tracked through RAUW to handle cases with nested
Expand Down

0 comments on commit dbc1f20

Please sign in to comment.