Skip to content

Commit

Permalink
[TTI] Use OperandValueInfo in getMemoryOpCost client api [nfc]
Browse files Browse the repository at this point in the history
This removes the last use of OperandValueKind from the client side API, and (once this is fully plumbed through TTI implementation) allow use of the same properties in store costing as arithmetic costing.
  • Loading branch information
preames committed Aug 22, 2022
1 parent 71771f8 commit 27d3321
Show file tree
Hide file tree
Showing 4 changed files with 18 additions and 15 deletions.
2 changes: 1 addition & 1 deletion llvm/include/llvm/Analysis/TargetTransformInfo.h
Expand Up @@ -1182,7 +1182,7 @@ class TargetTransformInfo {
getMemoryOpCost(unsigned Opcode, Type *Src, Align Alignment,
unsigned AddressSpace,
TTI::TargetCostKind CostKind = TTI::TCK_RecipThroughput,
OperandValueKind OpdInfo = OK_AnyValue,
OperandValueInfo OpdInfo = {OK_AnyValue, OP_None},
const Instruction *I = nullptr) const;

/// \return The cost of VP Load and Store instructions.
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Analysis/TargetTransformInfo.cpp
Expand Up @@ -904,12 +904,12 @@ InstructionCost TargetTransformInfo::getReplicationShuffleCost(

InstructionCost TargetTransformInfo::getMemoryOpCost(
unsigned Opcode, Type *Src, Align Alignment, unsigned AddressSpace,
TTI::TargetCostKind CostKind, TTI::OperandValueKind OpdInfo,
TTI::TargetCostKind CostKind, TTI::OperandValueInfo OpdInfo,
const Instruction *I) const {
assert((I == nullptr || I->getOpcode() == Opcode) &&
"Opcode should reflect passed instruction.");
InstructionCost Cost = TTIImpl->getMemoryOpCost(
Opcode, Src, Alignment, AddressSpace, CostKind, OpdInfo, I);
Opcode, Src, Alignment, AddressSpace, CostKind, OpdInfo.Kind, I);
assert(Cost >= 0 && "TTI should not produce negative costs!");
return Cost;
}
Expand Down
8 changes: 4 additions & 4 deletions llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
Expand Up @@ -6398,9 +6398,9 @@ LoopVectorizationCostModel::getConsecutiveMemOpCost(Instruction *I,
Cost += TTI.getMaskedMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
CostKind);
} else {
TTI::OperandValueKind OpVK = TTI::getOperandInfo(I->getOperand(0)).Kind;
TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
Cost += TTI.getMemoryOpCost(I->getOpcode(), VectorTy, Alignment, AS,
CostKind, OpVK, I);
CostKind, OpInfo, I);
}

bool Reverse = ConsecutiveStride < 0;
Expand Down Expand Up @@ -6679,10 +6679,10 @@ LoopVectorizationCostModel::getMemoryInstructionCost(Instruction *I,
const Align Alignment = getLoadStoreAlignment(I);
unsigned AS = getLoadStoreAddressSpace(I);

TTI::OperandValueKind OpVK = TTI::getOperandInfo(I->getOperand(0)).Kind;
TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(I->getOperand(0));
return TTI.getAddressComputationCost(ValTy) +
TTI.getMemoryOpCost(I->getOpcode(), ValTy, Alignment, AS,
TTI::TCK_RecipThroughput, OpVK, I);
TTI::TCK_RecipThroughput, OpInfo, I);
}
return getWideningCost(I, VF);
}
Expand Down
19 changes: 11 additions & 8 deletions llvm/lib/Transforms/Vectorize/SLPVectorizer.cpp
Expand Up @@ -6048,15 +6048,17 @@ InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E,
auto *LI = cast<LoadInst>(V);
ScalarsCost += TTI->getMemoryOpCost(
Instruction::Load, LI->getType(), LI->getAlign(),
LI->getPointerAddressSpace(), CostKind, TTI::OK_AnyValue, LI);
LI->getPointerAddressSpace(), CostKind,
{TTI::OK_AnyValue, TTI::OP_None}, LI);
}
auto *LI = cast<LoadInst>(E->getMainOp());
auto *LoadTy = FixedVectorType::get(LI->getType(), VF);
Align Alignment = LI->getAlign();
GatherCost += VectorizedCnt *
TTI->getMemoryOpCost(Instruction::Load, LoadTy, Alignment,
LI->getPointerAddressSpace(),
CostKind, TTI::OK_AnyValue, LI);
CostKind, {TTI::OK_AnyValue,
TTI::OP_None}, LI);
GatherCost += ScatterVectorizeCnt *
TTI->getGatherScatterOpCost(
Instruction::Load, LoadTy, LI->getPointerOperand(),
Expand Down Expand Up @@ -6462,15 +6464,15 @@ InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E,
Align Alignment = cast<LoadInst>(VL0)->getAlign();
InstructionCost ScalarEltCost =
TTI->getMemoryOpCost(Instruction::Load, ScalarTy, Alignment, 0,
CostKind, TTI::OK_AnyValue, VL0);
CostKind, {TTI::OK_AnyValue, TTI::OP_None}, VL0);
if (NeedToShuffleReuses) {
CommonCost -= (EntryVF - VL.size()) * ScalarEltCost;
}
InstructionCost ScalarLdCost = VecTy->getNumElements() * ScalarEltCost;
InstructionCost VecLdCost;
if (E->State == TreeEntry::Vectorize) {
VecLdCost = TTI->getMemoryOpCost(Instruction::Load, VecTy, Alignment, 0,
CostKind, TTI::OK_AnyValue, VL0);
CostKind, {TTI::OK_AnyValue, TTI::OP_None}, VL0);
} else {
assert(E->State == TreeEntry::ScatterVectorize && "Unknown EntryState");
Align CommonAlignment = Alignment;
Expand All @@ -6490,11 +6492,11 @@ InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E,
auto *SI =
cast<StoreInst>(IsReorder ? VL[E->ReorderIndices.front()] : VL0);
Align Alignment = SI->getAlign();
TTI::OperandValueKind OpVK = TTI::getOperandInfo(SI->getOperand(0)).Kind;
TTI::OperandValueInfo OpInfo = TTI::getOperandInfo(SI->getOperand(0));
InstructionCost ScalarEltCost = TTI->getMemoryOpCost(
Instruction::Store, ScalarTy, Alignment, 0, CostKind, OpVK, VL0);
Instruction::Store, ScalarTy, Alignment, 0, CostKind, OpInfo, VL0);
InstructionCost ScalarStCost = VecTy->getNumElements() * ScalarEltCost;
OpVK = TTI::OK_AnyValue;
TTI::OperandValueKind OpVK = TTI::OK_AnyValue;
if (all_of(E->Scalars,
[](Value *V) {
return isConstant(cast<Instruction>(V)->getOperand(0));
Expand All @@ -6505,7 +6507,8 @@ InstructionCost BoUpSLP::getEntryCost(const TreeEntry *E,
}))
OpVK = TTI::OK_NonUniformConstantValue;
InstructionCost VecStCost = TTI->getMemoryOpCost(
Instruction::Store, VecTy, Alignment, 0, CostKind, OpVK, VL0);
Instruction::Store, VecTy, Alignment, 0, CostKind,
{OpVK, TTI::OP_None}, VL0);
LLVM_DEBUG(dumpTreeCosts(E, CommonCost, VecStCost, ScalarStCost));
return CommonCost + VecStCost - ScalarStCost;
}
Expand Down

0 comments on commit 27d3321

Please sign in to comment.