Skip to content

Commit

Permalink
[Alignment][NFC] Deprecate MachineMemOperand::getMachineMemOperand ve…
Browse files Browse the repository at this point in the history
…rsion that takes an untyped alignement.

Summary:
This is patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790

Reviewers: courbet

Subscribers: hiraditya, jfb, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D77138
  • Loading branch information
gchatelet committed Mar 31, 2020
1 parent c3ca117 commit 998118c
Show file tree
Hide file tree
Showing 5 changed files with 89 additions and 99 deletions.
15 changes: 9 additions & 6 deletions llvm/include/llvm/CodeGen/MachineFunction.h
Expand Up @@ -819,12 +819,15 @@ class MachineFunction {
AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);

/// FIXME: Remove once transition to Align is over.
inline MachineMemOperand *getMachineMemOperand(
MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s,
unsigned base_alignment, const AAMDNodes &AAInfo = AAMDNodes(),
const MDNode *Ranges = nullptr, SyncScope::ID SSID = SyncScope::System,
AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic) {
LLVM_ATTRIBUTE_DEPRECATED(
inline MachineMemOperand *getMachineMemOperand(
MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s,
unsigned base_alignment, const AAMDNodes &AAInfo = AAMDNodes(),
const MDNode *Ranges = nullptr,
SyncScope::ID SSID = SyncScope::System,
AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic),
"Use the version that takes Align instead") {
return getMachineMemOperand(PtrInfo, f, s, Align(base_alignment), AAInfo,
Ranges, SSID, Ordering, FailureOrdering);
}
Expand Down
10 changes: 4 additions & 6 deletions llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
Expand Up @@ -6679,19 +6679,17 @@ SDValue SelectionDAG::getMergeValues(ArrayRef<SDValue> Ops, const SDLoc &dl) {

SDValue SelectionDAG::getMemIntrinsicNode(
unsigned Opcode, const SDLoc &dl, SDVTList VTList, ArrayRef<SDValue> Ops,
EVT MemVT, MachinePointerInfo PtrInfo, unsigned Align,
EVT MemVT, MachinePointerInfo PtrInfo, unsigned Alignment,
MachineMemOperand::Flags Flags, uint64_t Size, const AAMDNodes &AAInfo) {
if (Align == 0) // Ensure that codegen never sees alignment 0
Align = getEVTAlignment(MemVT);

if (!Size && MemVT.isScalableVector())
Size = MemoryLocation::UnknownSize;
else if (!Size)
Size = MemVT.getStoreSize();

MachineFunction &MF = getMachineFunction();
MachineMemOperand *MMO =
MF.getMachineMemOperand(PtrInfo, Flags, Size, Align, AAInfo);
MachineMemOperand *MMO = MF.getMachineMemOperand(
PtrInfo, Flags, Size, Alignment ? Align(Alignment) : getEVTAlign(MemVT),
AAInfo);

return getMemIntrinsicNode(Opcode, dl, VTList, Ops, MemVT, MMO);
}
Expand Down
111 changes: 49 additions & 62 deletions llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
Expand Up @@ -4173,25 +4173,26 @@ void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
bool IsCompressing) {
SDLoc sdl = getCurSDLoc();

auto getMaskedStoreOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
unsigned& Alignment) {
auto getMaskedStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
MaybeAlign &Alignment) {
// llvm.masked.store.*(Src0, Ptr, alignment, Mask)
Src0 = I.getArgOperand(0);
Ptr = I.getArgOperand(1);
Alignment = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
Alignment =
MaybeAlign(cast<ConstantInt>(I.getArgOperand(2))->getZExtValue());
Mask = I.getArgOperand(3);
};
auto getCompressingStoreOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
unsigned& Alignment) {
auto getCompressingStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
MaybeAlign &Alignment) {
// llvm.masked.compressstore.*(Src0, Ptr, Mask)
Src0 = I.getArgOperand(0);
Ptr = I.getArgOperand(1);
Mask = I.getArgOperand(2);
Alignment = 0;
Alignment = None;
};

Value *PtrOperand, *MaskOperand, *Src0Operand;
unsigned Alignment;
MaybeAlign Alignment;
if (IsCompressing)
getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
else
Expand All @@ -4204,19 +4205,16 @@ void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,

EVT VT = Src0.getValueType();
if (!Alignment)
Alignment = DAG.getEVTAlignment(VT);
Alignment = DAG.getEVTAlign(VT);

AAMDNodes AAInfo;
I.getAAMetadata(AAInfo);

MachineMemOperand *MMO =
DAG.getMachineFunction().
getMachineMemOperand(MachinePointerInfo(PtrOperand),
MachineMemOperand::MOStore,
// TODO: Make MachineMemOperands aware of scalable
// vectors.
VT.getStoreSize().getKnownMinSize(),
Alignment, AAInfo);
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore,
// TODO: Make MachineMemOperands aware of scalable
// vectors.
VT.getStoreSize().getKnownMinSize(), *Alignment, AAInfo);
SDValue StoreNode =
DAG.getMaskedStore(getMemoryRoot(), sdl, Src0, Ptr, Offset, Mask, VT, MMO,
ISD::UNINDEXED, false /* Truncating */, IsCompressing);
Expand Down Expand Up @@ -4316,9 +4314,9 @@ void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
SDValue Src0 = getValue(I.getArgOperand(0));
SDValue Mask = getValue(I.getArgOperand(3));
EVT VT = Src0.getValueType();
unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(2)))->getZExtValue();
MaybeAlign Alignment(cast<ConstantInt>(I.getArgOperand(2))->getZExtValue());
if (!Alignment)
Alignment = DAG.getEVTAlignment(VT);
Alignment = DAG.getEVTAlign(VT);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();

AAMDNodes AAInfo;
Expand All @@ -4331,13 +4329,11 @@ void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this);

unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
MachineMemOperand *MMO = DAG.getMachineFunction().
getMachineMemOperand(MachinePointerInfo(AS),
MachineMemOperand::MOStore,
// TODO: Make MachineMemOperands aware of scalable
// vectors.
MemoryLocation::UnknownSize,
Alignment, AAInfo);
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
MachinePointerInfo(AS), MachineMemOperand::MOStore,
// TODO: Make MachineMemOperands aware of scalable
// vectors.
MemoryLocation::UnknownSize, *Alignment, AAInfo);
if (!UniformBase) {
Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
Index = getValue(Ptr);
Expand All @@ -4354,25 +4350,26 @@ void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
SDLoc sdl = getCurSDLoc();

auto getMaskedLoadOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
unsigned& Alignment) {
auto getMaskedLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
MaybeAlign &Alignment) {
// @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
Ptr = I.getArgOperand(0);
Alignment = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue();
Alignment =
MaybeAlign(cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
Mask = I.getArgOperand(2);
Src0 = I.getArgOperand(3);
};
auto getExpandingLoadOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0,
unsigned& Alignment) {
auto getExpandingLoadOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
MaybeAlign &Alignment) {
// @llvm.masked.expandload.*(Ptr, Mask, Src0)
Ptr = I.getArgOperand(0);
Alignment = 0;
Alignment = None;
Mask = I.getArgOperand(1);
Src0 = I.getArgOperand(2);
};

Value *PtrOperand, *MaskOperand, *Src0Operand;
unsigned Alignment;
MaybeAlign Alignment;
if (IsExpanding)
getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment);
else
Expand All @@ -4385,7 +4382,7 @@ void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {

EVT VT = Src0.getValueType();
if (!Alignment)
Alignment = DAG.getEVTAlignment(VT);
Alignment = DAG.getEVTAlign(VT);

AAMDNodes AAInfo;
I.getAAMetadata(AAInfo);
Expand All @@ -4403,14 +4400,11 @@ void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {

SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode();

MachineMemOperand *MMO =
DAG.getMachineFunction().
getMachineMemOperand(MachinePointerInfo(PtrOperand),
MachineMemOperand::MOLoad,
// TODO: Make MachineMemOperands aware of scalable
// vectors.
VT.getStoreSize().getKnownMinSize(),
Alignment, AAInfo, Ranges);
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad,
// TODO: Make MachineMemOperands aware of scalable
// vectors.
VT.getStoreSize().getKnownMinSize(), *Alignment, AAInfo, Ranges);

SDValue Load =
DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Offset, Mask, Src0, VT, MMO,
Expand All @@ -4430,9 +4424,9 @@ void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {

const TargetLowering &TLI = DAG.getTargetLoweringInfo();
EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(1)))->getZExtValue();
MaybeAlign Alignment(cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
if (!Alignment)
Alignment = DAG.getEVTAlignment(VT);
Alignment = DAG.getEVTAlign(VT);

AAMDNodes AAInfo;
I.getAAMetadata(AAInfo);
Expand All @@ -4445,14 +4439,11 @@ void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
SDValue Scale;
bool UniformBase = getUniformBase(Ptr, Base, Index, IndexType, Scale, this);
unsigned AS = Ptr->getType()->getScalarType()->getPointerAddressSpace();
MachineMemOperand *MMO =
DAG.getMachineFunction().
getMachineMemOperand(MachinePointerInfo(AS),
MachineMemOperand::MOLoad,
// TODO: Make MachineMemOperands aware of scalable
// vectors.
MemoryLocation::UnknownSize,
Alignment, AAInfo, Ranges);
MachineMemOperand *MMO = DAG.getMachineFunction().getMachineMemOperand(
MachinePointerInfo(AS), MachineMemOperand::MOLoad,
// TODO: Make MachineMemOperands aware of scalable
// vectors.
MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);

if (!UniformBase) {
Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
Expand All @@ -4479,16 +4470,14 @@ void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) {
MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType();
SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other);

auto Alignment = DAG.getEVTAlignment(MemVT);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());

MachineFunction &MF = DAG.getMachineFunction();
MachineMemOperand *MMO =
MF.getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
Flags, MemVT.getStoreSize(), Alignment,
AAMDNodes(), nullptr, SSID, SuccessOrdering,
FailureOrdering);
MachineMemOperand *MMO = MF.getMachineMemOperand(
MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, SuccessOrdering,
FailureOrdering);

SDValue L = DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS,
dl, MemVT, VTs, InChain,
Expand Down Expand Up @@ -4527,15 +4516,13 @@ void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
SDValue InChain = getRoot();

auto MemVT = getValue(I.getValOperand()).getSimpleValueType();
auto Alignment = DAG.getEVTAlignment(MemVT);
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
auto Flags = TLI.getAtomicMemOperandFlags(I, DAG.getDataLayout());

MachineFunction &MF = DAG.getMachineFunction();
MachineMemOperand *MMO =
MF.getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), Flags,
MemVT.getStoreSize(), Alignment, AAMDNodes(),
nullptr, SSID, Ordering);
MachineMemOperand *MMO = MF.getMachineMemOperand(
MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(),
DAG.getEVTAlign(MemVT), AAMDNodes(), nullptr, SSID, Ordering);

SDValue L =
DAG.getAtomic(NT, dl, MemVT, InChain,
Expand Down
36 changes: 19 additions & 17 deletions llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
Expand Up @@ -2176,32 +2176,33 @@ bool AArch64FrameLowering::spillCalleeSavedRegisters(
// Rationale: This sequence saves uop updates compared to a sequence of
// pre-increment spills like stp xi,xj,[sp,#-16]!
// Note: Similar rationale and sequence for restores in epilog.
unsigned Size, Align;
unsigned Size;
Align Alignment;
switch (RPI.Type) {
case RegPairInfo::GPR:
StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui;
Size = 8;
Align = 8;
Alignment = Align(8);
break;
case RegPairInfo::FPR64:
StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui;
Size = 8;
Align = 8;
Alignment = Align(8);
break;
case RegPairInfo::FPR128:
StrOpc = RPI.isPaired() ? AArch64::STPQi : AArch64::STRQui;
Size = 16;
Align = 16;
Alignment = Align(16);
break;
case RegPairInfo::ZPR:
StrOpc = AArch64::STR_ZXI;
Size = 16;
Align = 16;
Alignment = Align(16);
break;
case RegPairInfo::PPR:
StrOpc = AArch64::STR_PXI;
Size = 2;
Align = 2;
Alignment = Align(2);
break;
}
LLVM_DEBUG(dbgs() << "CSR spill: (" << printReg(Reg1, TRI);
Expand Down Expand Up @@ -2230,16 +2231,16 @@ bool AArch64FrameLowering::spillCalleeSavedRegisters(
MIB.addReg(Reg2, getPrologueDeath(MF, Reg2));
MIB.addMemOperand(MF.getMachineMemOperand(
MachinePointerInfo::getFixedStack(MF, FrameIdxReg2),
MachineMemOperand::MOStore, Size, Align));
MachineMemOperand::MOStore, Size, Alignment));
}
MIB.addReg(Reg1, getPrologueDeath(MF, Reg1))
.addReg(AArch64::SP)
.addImm(RPI.Offset) // [sp, #offset*scale],
// where factor*scale is implicit
.setMIFlag(MachineInstr::FrameSetup);
MIB.addMemOperand(MF.getMachineMemOperand(
MachinePointerInfo::getFixedStack(MF,FrameIdxReg1),
MachineMemOperand::MOStore, Size, Align));
MachinePointerInfo::getFixedStack(MF, FrameIdxReg1),
MachineMemOperand::MOStore, Size, Alignment));
if (NeedsWinCFI)
InsertSEH(MIB, TII, MachineInstr::FrameSetup);

Expand Down Expand Up @@ -2281,32 +2282,33 @@ bool AArch64FrameLowering::restoreCalleeSavedRegisters(
// ldp x22, x21, [sp, #0] // addImm(+0)
// Note: see comment in spillCalleeSavedRegisters()
unsigned LdrOpc;
unsigned Size, Align;
unsigned Size;
Align Alignment;
switch (RPI.Type) {
case RegPairInfo::GPR:
LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui;
Size = 8;
Align = 8;
Alignment = Align(8);
break;
case RegPairInfo::FPR64:
LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui;
Size = 8;
Align = 8;
Alignment = Align(8);
break;
case RegPairInfo::FPR128:
LdrOpc = RPI.isPaired() ? AArch64::LDPQi : AArch64::LDRQui;
Size = 16;
Align = 16;
Alignment = Align(16);
break;
case RegPairInfo::ZPR:
LdrOpc = AArch64::LDR_ZXI;
Size = 16;
Align = 16;
Alignment = Align(16);
break;
case RegPairInfo::PPR:
LdrOpc = AArch64::LDR_PXI;
Size = 2;
Align = 2;
Alignment = Align(2);
break;
}
LLVM_DEBUG(dbgs() << "CSR restore: (" << printReg(Reg1, TRI);
Expand All @@ -2329,7 +2331,7 @@ bool AArch64FrameLowering::restoreCalleeSavedRegisters(
MIB.addReg(Reg2, getDefRegState(true));
MIB.addMemOperand(MF.getMachineMemOperand(
MachinePointerInfo::getFixedStack(MF, FrameIdxReg2),
MachineMemOperand::MOLoad, Size, Align));
MachineMemOperand::MOLoad, Size, Alignment));
}
MIB.addReg(Reg1, getDefRegState(true))
.addReg(AArch64::SP)
Expand All @@ -2338,7 +2340,7 @@ bool AArch64FrameLowering::restoreCalleeSavedRegisters(
.setMIFlag(MachineInstr::FrameDestroy);
MIB.addMemOperand(MF.getMachineMemOperand(
MachinePointerInfo::getFixedStack(MF, FrameIdxReg1),
MachineMemOperand::MOLoad, Size, Align));
MachineMemOperand::MOLoad, Size, Alignment));
if (NeedsWinCFI)
InsertSEH(MIB, TII, MachineInstr::FrameDestroy);
};
Expand Down

0 comments on commit 998118c

Please sign in to comment.