Skip to content

Commit

Permalink
[Alignment][NFC] MachineMemOperand::getAlign/getBaseAlign
Browse files Browse the repository at this point in the history
Summary:
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790

Reviewers: courbet

Subscribers: arsenm, dschuff, sdardis, nemanjai, jvesely, nhaehnle, sbc100, jgravelle-google, hiraditya, aheejin, kbarton, jrtc27, atanasyan, jfb, kerbowa, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D76925
  • Loading branch information
gchatelet committed Mar 27, 2020
1 parent a515fd0 commit 74eac90
Show file tree
Hide file tree
Showing 25 changed files with 116 additions and 102 deletions.
Expand Up @@ -429,7 +429,7 @@ bool InstructionSelector::executeMatchTable(
dbgs() << CurrentIdx << ": GIM_CheckMemoryAlignment"
<< "(MIs[" << InsnID << "]->memoperands() + " << MMOIdx
<< ")->getAlignment() >= " << MinAlign << ")\n");
if (MMO->getAlignment() < MinAlign && handleReject() == RejectAndGiveUp)
if (MMO->getAlign() < MinAlign && handleReject() == RejectAndGiveUp)
return false;

break;
Expand Down
20 changes: 17 additions & 3 deletions llvm/include/llvm/CodeGen/MachineMemOperand.h
Expand Up @@ -225,11 +225,25 @@ class MachineMemOperand {

/// Return the minimum known alignment in bytes of the actual memory
/// reference.
uint64_t getAlignment() const;
/// FIXME: Remove once transition to Align is over.
LLVM_ATTRIBUTE_DEPRECATED(uint64_t getAlignment() const,
"Use getAlign instead");

/// Return the minimum known alignment in bytes of the actual memory
/// reference.
Align getAlign() const;

/// Return the minimum known alignment in bytes of the base address, without
/// the offset.
/// FIXME: Remove once transition to Align is over.
LLVM_ATTRIBUTE_DEPRECATED(uint64_t getBaseAlignment() const,
"Use getBaseAlign instead") {
return BaseAlign.value();
}

/// Return the minimum known alignment in bytes of the base address, without
/// the offset.
uint64_t getBaseAlignment() const { return BaseAlign.value(); }
Align getBaseAlign() const { return BaseAlign; }

/// Return the AA tags for the memory reference.
AAMDNodes getAAInfo() const { return AAInfo; }
Expand Down Expand Up @@ -307,7 +321,7 @@ class MachineMemOperand {
LHS.getFlags() == RHS.getFlags() &&
LHS.getAAInfo() == RHS.getAAInfo() &&
LHS.getRanges() == RHS.getRanges() &&
LHS.getAlignment() == RHS.getAlignment() &&
LHS.getAlign() == RHS.getAlign() &&
LHS.getAddrSpace() == RHS.getAddrSpace();
}

Expand Down
8 changes: 2 additions & 6 deletions llvm/include/llvm/CodeGen/SelectionDAGNodes.h
Expand Up @@ -1292,12 +1292,8 @@ class MemSDNode : public SDNode {
bool writeMem() const { return MMO->isStore(); }

/// Returns alignment and volatility of the memory access
unsigned getOriginalAlignment() const {
return MMO->getBaseAlignment();
}
unsigned getAlignment() const {
return MMO->getAlignment();
}
unsigned getOriginalAlignment() const { return MMO->getBaseAlign().value(); }
unsigned getAlignment() const { return MMO->getAlign().value(); }

/// Return the SubclassData value, without HasDebugValue. This contains an
/// encoding of the volatile flag, as well as bits used by subclasses. This
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
Expand Up @@ -1268,7 +1268,7 @@ bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) {
if (IsVolatile)
return false;

Align DstAlign(MemOp->getBaseAlignment());
Align DstAlign = MemOp->getBaseAlign();
Align SrcAlign;
Register Dst = MI.getOperand(1).getReg();
Register Src = MI.getOperand(2).getReg();
Expand All @@ -1277,7 +1277,7 @@ bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) {
if (ID != Intrinsic::memset) {
assert(MMOIt != MI.memoperands_end() && "Expected a second MMO on MI");
MemOp = *(++MMOIt);
SrcAlign = Align(MemOp->getBaseAlignment());
SrcAlign = MemOp->getBaseAlign();
}

// See if this is a constant length copy
Expand Down
3 changes: 1 addition & 2 deletions llvm/lib/CodeGen/GlobalISel/LegalizerInfo.cpp
Expand Up @@ -506,8 +506,7 @@ LegalizerInfo::getAction(const MachineInstr &MI,
SmallVector<LegalityQuery::MemDesc, 2> MemDescrs;
for (const auto &MMO : MI.memoperands())
MemDescrs.push_back({8 * MMO->getSize() /* in bits */,
8 * MMO->getAlignment(),
MMO->getOrdering()});
8 * MMO->getAlign().value(), MMO->getOrdering()});

return getAction({MI.getOpcode(), Types, MemDescrs});
}
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/CodeGen/MIRVRegNamerUtils.cpp
Expand Up @@ -112,7 +112,7 @@ std::string VRegRenamer::getInstructionOpcodeHash(MachineInstr &MI) {
MIOperands.push_back((unsigned)Op->getOrdering());
MIOperands.push_back((unsigned)Op->getAddrSpace());
MIOperands.push_back((unsigned)Op->getSyncScopeID());
MIOperands.push_back((unsigned)Op->getBaseAlignment());
MIOperands.push_back((unsigned)Op->getBaseAlign().value());
MIOperands.push_back((unsigned)Op->getFailureOrdering());
}

Expand Down
25 changes: 12 additions & 13 deletions llvm/lib/CodeGen/MachineFunction.cpp
Expand Up @@ -486,14 +486,14 @@ MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,

// If there is no pointer value, the offset isn't tracked so we need to adjust
// the base alignment.
unsigned Align = PtrInfo.V.isNull()
? MinAlign(MMO->getBaseAlignment(), Offset)
: MMO->getBaseAlignment();
Align Alignment = PtrInfo.V.isNull()
? commonAlignment(MMO->getBaseAlign(), Offset)
: MMO->getBaseAlign();

return new (Allocator)
MachineMemOperand(PtrInfo.getWithOffset(Offset), MMO->getFlags(), Size,
Align, AAMDNodes(), nullptr, MMO->getSyncScopeID(),
MMO->getOrdering(), MMO->getFailureOrdering());
return new (Allocator) MachineMemOperand(
PtrInfo.getWithOffset(Offset), MMO->getFlags(), Size, Alignment.value(),
AAMDNodes(), nullptr, MMO->getSyncScopeID(), MMO->getOrdering(),
MMO->getFailureOrdering());
}

MachineMemOperand *
Expand All @@ -503,18 +503,17 @@ MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
MachinePointerInfo(MMO->getValue(), MMO->getOffset()) :
MachinePointerInfo(MMO->getPseudoValue(), MMO->getOffset());

return new (Allocator)
MachineMemOperand(MPI, MMO->getFlags(), MMO->getSize(),
MMO->getBaseAlignment(), AAInfo,
MMO->getRanges(), MMO->getSyncScopeID(),
MMO->getOrdering(), MMO->getFailureOrdering());
return new (Allocator) MachineMemOperand(
MPI, MMO->getFlags(), MMO->getSize(), MMO->getBaseAlign().value(), AAInfo,
MMO->getRanges(), MMO->getSyncScopeID(), MMO->getOrdering(),
MMO->getFailureOrdering());
}

MachineMemOperand *
MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
MachineMemOperand::Flags Flags) {
return new (Allocator) MachineMemOperand(
MMO->getPointerInfo(), Flags, MMO->getSize(), MMO->getBaseAlignment(),
MMO->getPointerInfo(), Flags, MMO->getSize(), MMO->getBaseAlign().value(),
MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(),
MMO->getOrdering(), MMO->getFailureOrdering());
}
Expand Down
19 changes: 11 additions & 8 deletions llvm/lib/CodeGen/MachineOperand.cpp
Expand Up @@ -1014,7 +1014,6 @@ MachineMemOperand::MachineMemOperand(MachinePointerInfo ptrinfo, Flags f,
assert((PtrInfo.V.isNull() || PtrInfo.V.is<const PseudoSourceValue *>() ||
isa<PointerType>(PtrInfo.V.get<const Value *>()->getType())) &&
"invalid pointer value");
assert(getBaseAlignment() == a && a != 0 && "Alignment is not a power of 2!");
assert((isLoad() || isStore()) && "Not a load/store!");

AtomicInfo.SSID = static_cast<unsigned>(SSID);
Expand All @@ -1032,7 +1031,7 @@ void MachineMemOperand::Profile(FoldingSetNodeID &ID) const {
ID.AddInteger(Size);
ID.AddPointer(getOpaqueValue());
ID.AddInteger(getFlags());
ID.AddInteger(getBaseAlignment());
ID.AddInteger(getBaseAlign().value());
}

void MachineMemOperand::refineAlignment(const MachineMemOperand *MMO) {
Expand All @@ -1041,9 +1040,9 @@ void MachineMemOperand::refineAlignment(const MachineMemOperand *MMO) {
assert(MMO->getFlags() == getFlags() && "Flags mismatch!");
assert(MMO->getSize() == getSize() && "Size mismatch!");

if (MMO->getBaseAlignment() >= getBaseAlignment()) {
if (MMO->getBaseAlign() >= getBaseAlign()) {
// Update the alignment value.
BaseAlign = Align(MMO->getBaseAlignment());
BaseAlign = MMO->getBaseAlign();
// Also update the base and offset, because the new alignment may
// not be applicable with the old ones.
PtrInfo = MMO->PtrInfo;
Expand All @@ -1052,8 +1051,12 @@ void MachineMemOperand::refineAlignment(const MachineMemOperand *MMO) {

/// getAlignment - Return the minimum known alignment in bytes of the
/// actual memory reference.
uint64_t MachineMemOperand::getAlignment() const {
return MinAlign(getBaseAlignment(), getOffset());
uint64_t MachineMemOperand::getAlignment() const { return getAlign().value(); }

/// getAlign - Return the minimum known alignment in bytes of the
/// actual memory reference.
Align MachineMemOperand::getAlign() const {
return commonAlignment(getBaseAlign(), getOffset());
}

void MachineMemOperand::print(raw_ostream &OS, ModuleSlotTracker &MST,
Expand Down Expand Up @@ -1148,8 +1151,8 @@ void MachineMemOperand::print(raw_ostream &OS, ModuleSlotTracker &MST,
}
}
MachineOperand::printOperandOffset(OS, getOffset());
if (getBaseAlignment() != getSize())
OS << ", align " << getBaseAlignment();
if (getBaseAlign() != getSize())
OS << ", align " << getBaseAlign().value();
auto AAInfo = getAAInfo();
if (AAInfo.TBAA) {
OS << ", !tbaa ";
Expand Down
8 changes: 4 additions & 4 deletions llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
Expand Up @@ -21342,16 +21342,16 @@ bool DAGCombiner::isAlias(SDNode *Op0, SDNode *Op1) const {
// multiples of the size of the data.
int64_t SrcValOffset0 = MUC0.MMO->getOffset();
int64_t SrcValOffset1 = MUC1.MMO->getOffset();
unsigned OrigAlignment0 = MUC0.MMO->getBaseAlignment();
unsigned OrigAlignment1 = MUC1.MMO->getBaseAlignment();
Align OrigAlignment0 = MUC0.MMO->getBaseAlign();
Align OrigAlignment1 = MUC1.MMO->getBaseAlign();
auto &Size0 = MUC0.NumBytes;
auto &Size1 = MUC1.NumBytes;
if (OrigAlignment0 == OrigAlignment1 && SrcValOffset0 != SrcValOffset1 &&
Size0.hasValue() && Size1.hasValue() && *Size0 == *Size1 &&
OrigAlignment0 > *Size0 && SrcValOffset0 % *Size0 == 0 &&
SrcValOffset1 % *Size1 == 0) {
int64_t OffAlign0 = SrcValOffset0 % OrigAlignment0;
int64_t OffAlign1 = SrcValOffset1 % OrigAlignment1;
int64_t OffAlign0 = SrcValOffset0 % OrigAlignment0.value();
int64_t OffAlign1 = SrcValOffset1 % OrigAlignment1.value();

// There is no overlap between these relatively aligned accesses of
// similar size. Return no alias.
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/CodeGen/TargetLoweringBase.cpp
Expand Up @@ -1557,7 +1557,7 @@ bool TargetLoweringBase::allowsMemoryAccessForAlignment(
LLVMContext &Context, const DataLayout &DL, EVT VT,
const MachineMemOperand &MMO, bool *Fast) const {
return allowsMemoryAccessForAlignment(Context, DL, VT, MMO.getAddrSpace(),
MMO.getAlignment(), MMO.getFlags(),
MMO.getAlign().value(), MMO.getFlags(),
Fast);
}

Expand All @@ -1573,7 +1573,7 @@ bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
const MachineMemOperand &MMO,
bool *Fast) const {
return allowsMemoryAccess(Context, DL, VT, MMO.getAddrSpace(),
MMO.getAlignment(), MMO.getFlags(), Fast);
MMO.getAlign().value(), MMO.getFlags(), Fast);
}

BranchProbability TargetLoweringBase::getPredictableBranchThreshold() const {
Expand Down
18 changes: 9 additions & 9 deletions llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
Expand Up @@ -444,15 +444,15 @@ static bool isScalarLoadLegal(const MachineInstr &MI) {
AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT;

// There are no extending SMRD/SMEM loads, and they require 4-byte alignment.
return MMO->getSize() >= 4 && MMO->getAlignment() >= 4 &&
// Can't do a scalar atomic load.
!MMO->isAtomic() &&
// Don't use scalar loads for volatile accesses to non-constant address
// spaces.
(IsConst || !MMO->isVolatile()) &&
// Memory must be known constant, or not written before this load.
(IsConst || MMO->isInvariant() || memOpHasNoClobbered(MMO)) &&
AMDGPUInstrInfo::isUniformMMO(MMO);
return MMO->getSize() >= 4 && MMO->getAlign() >= Align(4) &&
// Can't do a scalar atomic load.
!MMO->isAtomic() &&
// Don't use scalar loads for volatile accesses to non-constant address
// spaces.
(IsConst || !MMO->isVolatile()) &&
// Memory must be known constant, or not written before this load.
(IsConst || MMO->isInvariant() || memOpHasNoClobbered(MMO)) &&
AMDGPUInstrInfo::isUniformMMO(MMO);
}

RegisterBankInfo::InstructionMappings
Expand Down
14 changes: 8 additions & 6 deletions llvm/lib/Target/ARM/ARMBaseInstrInfo.cpp
Expand Up @@ -3777,7 +3777,7 @@ unsigned ARMBaseInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
// If there are odd number of registers or if it's not 64-bit aligned,
// then it takes an extra AGU (Address Generation Unit) cycle.
if ((NumRegs % 2) || !MI.hasOneMemOperand() ||
(*MI.memoperands_begin())->getAlignment() < 8)
(*MI.memoperands_begin())->getAlign() < Align(8))
++UOps;
return UOps;
}
Expand Down Expand Up @@ -4364,10 +4364,10 @@ int ARMBaseInstrInfo::getOperandLatencyImpl(
return -1;

unsigned DefAlign = DefMI.hasOneMemOperand()
? (*DefMI.memoperands_begin())->getAlignment()
? (*DefMI.memoperands_begin())->getAlign().value()
: 0;
unsigned UseAlign = UseMI.hasOneMemOperand()
? (*UseMI.memoperands_begin())->getAlignment()
? (*UseMI.memoperands_begin())->getAlign().value()
: 0;

// Get the itinerary's latency if possible, and handle variable_ops.
Expand Down Expand Up @@ -4414,10 +4414,12 @@ ARMBaseInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
const MCInstrDesc &UseMCID = get(UseNode->getMachineOpcode());
auto *DefMN = cast<MachineSDNode>(DefNode);
unsigned DefAlign = !DefMN->memoperands_empty()
? (*DefMN->memoperands_begin())->getAlignment() : 0;
? (*DefMN->memoperands_begin())->getAlign().value()
: 0;
auto *UseMN = cast<MachineSDNode>(UseNode);
unsigned UseAlign = !UseMN->memoperands_empty()
? (*UseMN->memoperands_begin())->getAlignment() : 0;
? (*UseMN->memoperands_begin())->getAlign().value()
: 0;
int Latency = getOperandLatency(ItinData, DefMCID, DefIdx, DefAlign,
UseMCID, UseIdx, UseAlign);

Expand Down Expand Up @@ -4708,7 +4710,7 @@ unsigned ARMBaseInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,

// Adjust for dynamic def-side opcode variants not captured by the itinerary.
unsigned DefAlign =
MI.hasOneMemOperand() ? (*MI.memoperands_begin())->getAlignment() : 0;
MI.hasOneMemOperand() ? (*MI.memoperands_begin())->getAlign().value() : 0;
int Adj = adjustDefLatency(Subtarget, MI, MCID, DefAlign);
if (Adj >= 0 || (int)Latency > -Adj) {
return Latency + Adj;
Expand Down
12 changes: 6 additions & 6 deletions llvm/lib/Target/ARM/ARMLoadStoreOptimizer.cpp
Expand Up @@ -1608,7 +1608,7 @@ static bool isMemoryOp(const MachineInstr &MI) {

// Unaligned ldr/str is emulated by some kernels, but unaligned ldm/stm is
// not.
if (MMO.getAlignment() < 4)
if (MMO.getAlign() < Align(4))
return false;

// str <undef> could probably be eliminated entirely, but for now we just want
Expand Down Expand Up @@ -2183,12 +2183,12 @@ ARMPreAllocLoadStoreOpt::CanFormLdStDWord(MachineInstr *Op0, MachineInstr *Op1,
(*Op0->memoperands_begin())->isAtomic())
return false;

unsigned Align = (*Op0->memoperands_begin())->getAlignment();
Align Alignment = (*Op0->memoperands_begin())->getAlign();
const Function &Func = MF->getFunction();
unsigned ReqAlign = STI->hasV6Ops()
? TD->getABITypeAlignment(Type::getInt64Ty(Func.getContext()))
: 8; // Pre-v6 need 8-byte align
if (Align < ReqAlign)
Align ReqAlign =
STI->hasV6Ops() ? TD->getABITypeAlign(Type::getInt64Ty(Func.getContext()))
: Align(8); // Pre-v6 need 8-byte align
if (Alignment < ReqAlign)
return false;

// Then make sure the immediate offset fits.
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/ARM/Thumb2SizeReduction.cpp
Expand Up @@ -457,7 +457,7 @@ Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
return false;

if (!MI->hasOneMemOperand() ||
(*MI->memoperands_begin())->getAlignment() < 4)
(*MI->memoperands_begin())->getAlign() < Align(4))
return false;

// We're creating a completely different type of load/store - LDM from LDR.
Expand Down
7 changes: 3 additions & 4 deletions llvm/lib/Target/Hexagon/HexagonInstrInfo.cpp
Expand Up @@ -1027,10 +1027,9 @@ bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
auto UseAligned = [&] (const MachineInstr &MI, unsigned NeedAlign) {
if (MI.memoperands().empty())
return false;
return all_of(MI.memoperands(),
[NeedAlign] (const MachineMemOperand *MMO) {
return NeedAlign <= MMO->getAlignment();
});
return all_of(MI.memoperands(), [NeedAlign](const MachineMemOperand *MMO) {
return MMO->getAlign() >= NeedAlign;
});
};

switch (Opc) {
Expand Down
7 changes: 4 additions & 3 deletions llvm/lib/Target/Hexagon/HexagonSplitDouble.cpp
Expand Up @@ -688,11 +688,12 @@ void HexagonSplitDoubleRegs::splitMemRef(MachineInstr *MI,
for (auto &MO : MI->memoperands()) {
const MachinePointerInfo &Ptr = MO->getPointerInfo();
MachineMemOperand::Flags F = MO->getFlags();
int A = MO->getAlignment();
Align A = MO->getAlign();

auto *Tmp1 = MF.getMachineMemOperand(Ptr, F, 4/*size*/, A);
auto *Tmp1 = MF.getMachineMemOperand(Ptr, F, 4 /*size*/, A.value());
LowI->addMemOperand(MF, Tmp1);
auto *Tmp2 = MF.getMachineMemOperand(Ptr, F, 4/*size*/, std::min(A, 4));
auto *Tmp2 = MF.getMachineMemOperand(Ptr, F, 4 /*size*/,
std::min(A, Align(4)).value());
HighI->addMemOperand(MF, Tmp2);
}
}
Expand Down
9 changes: 4 additions & 5 deletions llvm/lib/Target/Hexagon/HexagonStoreWidening.cpp
Expand Up @@ -314,7 +314,7 @@ bool HexagonStoreWidening::selectStores(InstrGroup::iterator Begin,
MachineInstr *FirstMI = *Begin;
assert(!FirstMI->memoperands_empty() && "Expecting some memory operands");
const MachineMemOperand &FirstMMO = getStoreTarget(FirstMI);
unsigned Alignment = FirstMMO.getAlignment();
unsigned Alignment = FirstMMO.getAlign().value();
unsigned SizeAccum = FirstMMO.getSize();
unsigned FirstOffset = getStoreOffset(FirstMI);

Expand Down Expand Up @@ -416,10 +416,9 @@ bool HexagonStoreWidening::createWideStores(InstrGroup &OG, InstrGroup &NG,
MachineInstr *FirstSt = OG.front();
DebugLoc DL = OG.back()->getDebugLoc();
const MachineMemOperand &OldM = getStoreTarget(FirstSt);
MachineMemOperand *NewM =
MF->getMachineMemOperand(OldM.getPointerInfo(), OldM.getFlags(),
TotalSize, OldM.getAlignment(),
OldM.getAAInfo());
MachineMemOperand *NewM = MF->getMachineMemOperand(
OldM.getPointerInfo(), OldM.getFlags(), TotalSize,
OldM.getAlign().value(), OldM.getAAInfo());

if (Acc < 0x10000) {
// Create mem[hw] = #Acc
Expand Down

0 comments on commit 74eac90

Please sign in to comment.