4 changes: 2 additions & 2 deletions llvm/lib/CodeGen/SelectionDAG/SelectionDAGAddressAnalysis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -85,9 +85,9 @@ bool BaseIndexOffset::equalBaseIndex(const BaseIndexOffset &Other,
}

bool BaseIndexOffset::computeAliasing(const SDNode *Op0,
const Optional<int64_t> NumBytes0,
const std::optional<int64_t> NumBytes0,
const SDNode *Op1,
const Optional<int64_t> NumBytes1,
const std::optional<int64_t> NumBytes1,
const SelectionDAG &DAG, bool &IsAlias) {

BaseIndexOffset BasePtr0 = match(Op0, DAG);
Expand Down
12 changes: 6 additions & 6 deletions llvm/lib/CodeGen/SelectionDAG/StatepointLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -159,9 +159,9 @@ StatepointLoweringState::allocateStackSlot(EVT ValueType,
/// Utility function for reservePreviousStackSlotForValue. Tries to find
/// stack slot index to which we have spilled value for previous statepoints.
/// LookUpDepth specifies maximum DFS depth this function is allowed to look.
static Optional<int> findPreviousSpillSlot(const Value *Val,
SelectionDAGBuilder &Builder,
int LookUpDepth) {
static std::optional<int> findPreviousSpillSlot(const Value *Val,
SelectionDAGBuilder &Builder,
int LookUpDepth) {
// Can not look any further - give up now
if (LookUpDepth <= 0)
return std::nullopt;
Expand Down Expand Up @@ -196,10 +196,10 @@ static Optional<int> findPreviousSpillSlot(const Value *Val,
// All incoming values should have same known stack slot, otherwise result
// is unknown.
if (const PHINode *Phi = dyn_cast<PHINode>(Val)) {
Optional<int> MergedResult;
std::optional<int> MergedResult;

for (const auto &IncomingValue : Phi->incoming_values()) {
Optional<int> SpillSlot =
std::optional<int> SpillSlot =
findPreviousSpillSlot(IncomingValue, Builder, LookUpDepth - 1);
if (!SpillSlot)
return std::nullopt;
Expand Down Expand Up @@ -283,7 +283,7 @@ static void reservePreviousStackSlotForValue(const Value *IncomingValue,
return;

const int LookUpDepth = 6;
Optional<int> Index =
std::optional<int> Index =
findPreviousSpillSlot(IncomingValue, Builder, LookUpDepth);
if (!Index)
return;
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

using namespace llvm;

Optional<RegOrConstant>
std::optional<RegOrConstant>
AArch64GISelUtils::getAArch64VectorSplat(const MachineInstr &MI,
const MachineRegisterInfo &MRI) {
if (auto Splat = getVectorSplat(MI, MRI))
Expand All @@ -31,7 +31,7 @@ AArch64GISelUtils::getAArch64VectorSplat(const MachineInstr &MI,
return RegOrConstant(Src);
}

Optional<int64_t>
std::optional<int64_t>
AArch64GISelUtils::getAArch64VectorSplatScalar(const MachineInstr &MI,
const MachineRegisterInfo &MRI) {
auto Splat = getAArch64VectorSplat(MI, MRI);
Expand Down
9 changes: 5 additions & 4 deletions llvm/lib/Target/AArch64/GISel/AArch64GlobalISelUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,13 +31,14 @@ constexpr bool isLegalArithImmed(const uint64_t C) {

/// \returns A value when \p MI is a vector splat of a Register or constant.
/// Checks for generic opcodes and AArch64-specific generic opcodes.
Optional<RegOrConstant> getAArch64VectorSplat(const MachineInstr &MI,
const MachineRegisterInfo &MRI);
std::optional<RegOrConstant>
getAArch64VectorSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI);

/// \returns A value when \p MI is a constant vector splat.
/// Checks for generic opcodes and AArch64-specific generic opcodes.
Optional<int64_t> getAArch64VectorSplatScalar(const MachineInstr &MI,
const MachineRegisterInfo &MRI);
std::optional<int64_t>
getAArch64VectorSplatScalar(const MachineInstr &MI,
const MachineRegisterInfo &MRI);

/// \returns true if \p MaybeSub and \p Pred are part of a CMN tree for an
/// integer compare.
Expand Down
44 changes: 22 additions & 22 deletions llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ class AArch64InstructionSelector : public InstructionSelector {
/// The lane inserted into is defined by \p LaneIdx. The vector source
/// register is given by \p SrcReg. The register containing the element is
/// given by \p EltReg.
MachineInstr *emitLaneInsert(Optional<Register> DstReg, Register SrcReg,
MachineInstr *emitLaneInsert(std::optional<Register> DstReg, Register SrcReg,
Register EltReg, unsigned LaneIdx,
const RegisterBank &RB,
MachineIRBuilder &MIRBuilder) const;
Expand Down Expand Up @@ -205,7 +205,7 @@ class AArch64InstructionSelector : public InstructionSelector {
MachineIRBuilder &MIRBuilder) const;

// Emit a vector concat operation.
MachineInstr *emitVectorConcat(Optional<Register> Dst, Register Op1,
MachineInstr *emitVectorConcat(std::optional<Register> Dst, Register Op1,
Register Op2,
MachineIRBuilder &MIRBuilder) const;

Expand All @@ -218,7 +218,7 @@ class AArch64InstructionSelector : public InstructionSelector {
/// \p Pred if given is the intended predicate to use.
MachineInstr *
emitFPCompare(Register LHS, Register RHS, MachineIRBuilder &MIRBuilder,
Optional<CmpInst::Predicate> = std::nullopt) const;
std::optional<CmpInst::Predicate> = std::nullopt) const;

MachineInstr *
emitInstr(unsigned Opcode, std::initializer_list<llvm::DstOp> DstOps,
Expand Down Expand Up @@ -276,7 +276,7 @@ class AArch64InstructionSelector : public InstructionSelector {
MachineInstr *emitSelect(Register Dst, Register LHS, Register RHS,
AArch64CC::CondCode CC,
MachineIRBuilder &MIRBuilder) const;
MachineInstr *emitExtractVectorElt(Optional<Register> DstReg,
MachineInstr *emitExtractVectorElt(std::optional<Register> DstReg,
const RegisterBank &DstRB, LLT ScalarTy,
Register VecReg, unsigned LaneIdx,
MachineIRBuilder &MIRBuilder) const;
Expand Down Expand Up @@ -674,7 +674,7 @@ static Register createQTuple(ArrayRef<Register> Regs, MachineIRBuilder &MIB) {
return createTuple(Regs, RegClassIDs, SubRegs, MIB);
}

static Optional<uint64_t> getImmedFromMO(const MachineOperand &Root) {
static std::optional<uint64_t> getImmedFromMO(const MachineOperand &Root) {
auto &MI = *Root.getParent();
auto &MBB = *MI.getParent();
auto &MF = *MBB.getParent();
Expand Down Expand Up @@ -1782,17 +1782,18 @@ bool AArch64InstructionSelector::selectCompareBranch(

/// Returns the element immediate value of a vector shift operand if found.
/// This needs to detect a splat-like operation, e.g. a G_BUILD_VECTOR.
static Optional<int64_t> getVectorShiftImm(Register Reg,
MachineRegisterInfo &MRI) {
static std::optional<int64_t> getVectorShiftImm(Register Reg,
MachineRegisterInfo &MRI) {
assert(MRI.getType(Reg).isVector() && "Expected a *vector* shift operand");
MachineInstr *OpMI = MRI.getVRegDef(Reg);
return getAArch64VectorSplatScalar(*OpMI, MRI);
}

/// Matches and returns the shift immediate value for a SHL instruction given
/// a shift operand.
static Optional<int64_t> getVectorSHLImm(LLT SrcTy, Register Reg, MachineRegisterInfo &MRI) {
Optional<int64_t> ShiftImm = getVectorShiftImm(Reg, MRI);
static std::optional<int64_t> getVectorSHLImm(LLT SrcTy, Register Reg,
MachineRegisterInfo &MRI) {
std::optional<int64_t> ShiftImm = getVectorShiftImm(Reg, MRI);
if (!ShiftImm)
return std::nullopt;
// Check the immediate is in range for a SHL.
Expand Down Expand Up @@ -1836,7 +1837,7 @@ bool AArch64InstructionSelector::selectVectorSHL(MachineInstr &I,

// Check if we have a vector of constants on RHS that we can select as the
// immediate form.
Optional<int64_t> ImmVal = getVectorSHLImm(Ty, Src2Reg, MRI);
std::optional<int64_t> ImmVal = getVectorSHLImm(Ty, Src2Reg, MRI);

unsigned Opc = 0;
if (Ty == LLT::fixed_vector(2, 64)) {
Expand Down Expand Up @@ -3102,7 +3103,7 @@ bool AArch64InstructionSelector::select(MachineInstr &I) {

case TargetOpcode::G_PTRMASK: {
Register MaskReg = I.getOperand(2).getReg();
Optional<int64_t> MaskVal = getIConstantVRegSExtVal(MaskReg, MRI);
std::optional<int64_t> MaskVal = getIConstantVRegSExtVal(MaskReg, MRI);
// TODO: Implement arbitrary cases
if (!MaskVal || !isShiftedMask_64(*MaskVal))
return false;
Expand Down Expand Up @@ -4112,7 +4113,7 @@ static bool getLaneCopyOpcode(unsigned &CopyOpc, unsigned &ExtractSubReg,
}

MachineInstr *AArch64InstructionSelector::emitExtractVectorElt(
Optional<Register> DstReg, const RegisterBank &DstRB, LLT ScalarTy,
std::optional<Register> DstReg, const RegisterBank &DstRB, LLT ScalarTy,
Register VecReg, unsigned LaneIdx, MachineIRBuilder &MIRBuilder) const {
MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
unsigned CopyOpc = 0;
Expand Down Expand Up @@ -4645,10 +4646,9 @@ MachineInstr *AArch64InstructionSelector::emitCSetForFCmp(
return &*OrMI;
}

MachineInstr *
AArch64InstructionSelector::emitFPCompare(Register LHS, Register RHS,
MachineIRBuilder &MIRBuilder,
Optional<CmpInst::Predicate> Pred) const {
MachineInstr *AArch64InstructionSelector::emitFPCompare(
Register LHS, Register RHS, MachineIRBuilder &MIRBuilder,
std::optional<CmpInst::Predicate> Pred) const {
MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
LLT Ty = MRI.getType(LHS);
if (Ty.isVector())
Expand Down Expand Up @@ -4689,7 +4689,7 @@ AArch64InstructionSelector::emitFPCompare(Register LHS, Register RHS,
}

MachineInstr *AArch64InstructionSelector::emitVectorConcat(
Optional<Register> Dst, Register Op1, Register Op2,
std::optional<Register> Dst, Register Op1, Register Op2,
MachineIRBuilder &MIRBuilder) const {
// We implement a vector concat by:
// 1. Use scalar_to_vector to insert the lower vector into the larger dest
Expand Down Expand Up @@ -4865,7 +4865,7 @@ MachineInstr *AArch64InstructionSelector::emitConditionalComparison(
LLT OpTy = MRI.getType(LHS);
assert(OpTy.getSizeInBits() == 32 || OpTy.getSizeInBits() == 64);
unsigned CCmpOpc;
Optional<ValueAndVReg> C;
std::optional<ValueAndVReg> C;
if (CmpInst::isIntPredicate(CC)) {
C = getIConstantVRegValWithLookThrough(RHS, MRI);
if (C && C->Value.ult(32))
Expand Down Expand Up @@ -5259,7 +5259,7 @@ bool AArch64InstructionSelector::selectShuffleVector(
}

MachineInstr *AArch64InstructionSelector::emitLaneInsert(
Optional<Register> DstReg, Register SrcReg, Register EltReg,
std::optional<Register> DstReg, Register SrcReg, Register EltReg,
unsigned LaneIdx, const RegisterBank &RB,
MachineIRBuilder &MIRBuilder) const {
MachineInstr *InsElt = nullptr;
Expand Down Expand Up @@ -6669,7 +6669,7 @@ AArch64_AM::ShiftExtendType AArch64InstructionSelector::getExtendTypeForInst(
if (Opc != TargetOpcode::G_AND)
return AArch64_AM::InvalidShiftExtend;

Optional<uint64_t> MaybeAndMask = getImmedFromMO(MI.getOperand(2));
std::optional<uint64_t> MaybeAndMask = getImmedFromMO(MI.getOperand(2));
if (!MaybeAndMask)
return AArch64_AM::InvalidShiftExtend;
uint64_t AndMask = *MaybeAndMask;
Expand Down Expand Up @@ -6724,7 +6724,7 @@ AArch64InstructionSelector::selectArithExtendedRegister(
if (RootDef->getOpcode() == TargetOpcode::G_SHL) {
// Look for a constant on the RHS of the shift.
MachineOperand &RHS = RootDef->getOperand(2);
Optional<uint64_t> MaybeShiftVal = getImmedFromMO(RHS);
std::optional<uint64_t> MaybeShiftVal = getImmedFromMO(RHS);
if (!MaybeShiftVal)
return std::nullopt;
ShiftVal = *MaybeShiftVal;
Expand Down Expand Up @@ -6774,7 +6774,7 @@ void AArch64InstructionSelector::renderTruncImm(MachineInstrBuilder &MIB,
const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
"Expected G_CONSTANT");
Optional<int64_t> CstVal =
std::optional<int64_t> CstVal =
getIConstantVRegSExtVal(MI.getOperand(0).getReg(), MRI);
assert(CstVal && "Expected constant value");
MIB.addImm(*CstVal);
Expand Down
35 changes: 14 additions & 21 deletions llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -360,13 +360,9 @@ bool AMDGPUAsmPrinter::doFinalization(Module &M) {

// Print comments that apply to both callable functions and entry points.
void AMDGPUAsmPrinter::emitCommonFunctionComments(
uint32_t NumVGPR,
Optional<uint32_t> NumAGPR,
uint32_t TotalNumVGPR,
uint32_t NumSGPR,
uint64_t ScratchSize,
uint64_t CodeSize,
const AMDGPUMachineFunction *MFI) {
uint32_t NumVGPR, std::optional<uint32_t> NumAGPR, uint32_t TotalNumVGPR,
uint32_t NumSGPR, uint64_t ScratchSize, uint64_t CodeSize,
const AMDGPUMachineFunction *MFI) {
OutStreamer->emitRawComment(" codeLenInByte = " + Twine(CodeSize), false);
OutStreamer->emitRawComment(" NumSgprs: " + Twine(NumSGPR), false);
OutStreamer->emitRawComment(" NumVgprs: " + Twine(NumVGPR), false);
Expand Down Expand Up @@ -523,24 +519,21 @@ bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
const AMDGPUResourceUsageAnalysis::SIFunctionResourceInfo &Info =
ResourceUsage->getResourceInfo(&MF.getFunction());
emitCommonFunctionComments(
Info.NumVGPR,
STM.hasMAIInsts() ? Info.NumAGPR : Optional<uint32_t>(),
Info.getTotalNumVGPRs(STM),
Info.getTotalNumSGPRs(MF.getSubtarget<GCNSubtarget>()),
Info.PrivateSegmentSize,
getFunctionCodeSize(MF), MFI);
Info.NumVGPR,
STM.hasMAIInsts() ? Info.NumAGPR : std::optional<uint32_t>(),
Info.getTotalNumVGPRs(STM),
Info.getTotalNumSGPRs(MF.getSubtarget<GCNSubtarget>()),
Info.PrivateSegmentSize, getFunctionCodeSize(MF), MFI);
return false;
}

OutStreamer->emitRawComment(" Kernel info:", false);
emitCommonFunctionComments(CurrentProgramInfo.NumArchVGPR,
STM.hasMAIInsts()
? CurrentProgramInfo.NumAccVGPR
: Optional<uint32_t>(),
CurrentProgramInfo.NumVGPR,
CurrentProgramInfo.NumSGPR,
CurrentProgramInfo.ScratchSize,
getFunctionCodeSize(MF), MFI);
emitCommonFunctionComments(
CurrentProgramInfo.NumArchVGPR,
STM.hasMAIInsts() ? CurrentProgramInfo.NumAccVGPR
: std::optional<uint32_t>(),
CurrentProgramInfo.NumVGPR, CurrentProgramInfo.NumSGPR,
CurrentProgramInfo.ScratchSize, getFunctionCodeSize(MF), MFI);

OutStreamer->emitRawComment(
" FloatMode: " + Twine(CurrentProgramInfo.FloatMode), false);
Expand Down
10 changes: 4 additions & 6 deletions llvm/lib/Target/AMDGPU/AMDGPUAsmPrinter.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,12 +63,10 @@ class AMDGPUAsmPrinter final : public AsmPrinter {
const SIProgramInfo &KernelInfo);
void emitPALFunctionMetadata(const MachineFunction &MF);
void emitCommonFunctionComments(uint32_t NumVGPR,
Optional<uint32_t> NumAGPR,
uint32_t TotalNumVGPR,
uint32_t NumSGPR,
uint64_t ScratchSize,
uint64_t CodeSize,
const AMDGPUMachineFunction* MFI);
std::optional<uint32_t> NumAGPR,
uint32_t TotalNumVGPR, uint32_t NumSGPR,
uint64_t ScratchSize, uint64_t CodeSize,
const AMDGPUMachineFunction *MFI);
void emitResourceUsageRemarks(const MachineFunction &MF,
const SIProgramInfo &CurrentProgramInfo,
bool isModuleEntryFunction, bool hasMAIInsts);
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -813,7 +813,7 @@ bool AMDGPUCallLowering::passSpecialInputs(MachineIRBuilder &MIRBuilder,
} else if (InputID == AMDGPUFunctionArgInfo::IMPLICIT_ARG_PTR) {
LI->getImplicitArgPtr(InputReg, MRI, MIRBuilder);
} else if (InputID == AMDGPUFunctionArgInfo::LDS_KERNEL_ID) {
Optional<uint32_t> Id =
std::optional<uint32_t> Id =
AMDGPUMachineFunction::getLDSKernelIdMetadata(MF.getFunction());
if (Id.has_value()) {
MIRBuilder.buildConstant(InputReg, Id.value());
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/AMDGPU/AMDGPUCombinerHelper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ static bool isInv2Pi(const APFloat &APF) {
// additional cost to negate them.
static bool isConstantCostlierToNegate(MachineInstr &MI, Register Reg,
MachineRegisterInfo &MRI) {
Optional<FPValueAndVReg> FPValReg;
std::optional<FPValueAndVReg> FPValReg;
if (mi_match(Reg, MRI, m_GFCstOrSplat(FPValReg))) {
if (FPValReg->Value.isZero() && !FPValReg->Value.isNegative())
return true;
Expand Down
6 changes: 3 additions & 3 deletions llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -500,16 +500,16 @@ void MetadataStreamerMsgPackV3::verify(StringRef HSAMetadataString) const {
}
}

Optional<StringRef>
std::optional<StringRef>
MetadataStreamerMsgPackV3::getAccessQualifier(StringRef AccQual) const {
return StringSwitch<Optional<StringRef>>(AccQual)
return StringSwitch<std::optional<StringRef>>(AccQual)
.Case("read_only", StringRef("read_only"))
.Case("write_only", StringRef("write_only"))
.Case("read_write", StringRef("read_write"))
.Default(std::nullopt);
}

Optional<StringRef> MetadataStreamerMsgPackV3::getAddressSpaceQualifier(
std::optional<StringRef> MetadataStreamerMsgPackV3::getAddressSpaceQualifier(
unsigned AddressSpace) const {
switch (AddressSpace) {
case AMDGPUAS::PRIVATE_ADDRESS:
Expand Down
5 changes: 3 additions & 2 deletions llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.h
Original file line number Diff line number Diff line change
Expand Up @@ -69,9 +69,10 @@ class MetadataStreamerMsgPackV3 : public MetadataStreamer {

void verify(StringRef HSAMetadataString) const;

Optional<StringRef> getAccessQualifier(StringRef AccQual) const;
std::optional<StringRef> getAccessQualifier(StringRef AccQual) const;

Optional<StringRef> getAddressSpaceQualifier(unsigned AddressSpace) const;
std::optional<StringRef>
getAddressSpaceQualifier(unsigned AddressSpace) const;

StringRef getValueKind(Type *Ty, StringRef TypeQual,
StringRef BaseTypeName) const;
Expand Down
6 changes: 3 additions & 3 deletions llvm/lib/Target/AMDGPU/AMDGPUIGroupLP.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ class SchedGroup {
SchedGroupMask SGMask;

// Maximum number of SUnits that can be added to this group.
Optional<unsigned> MaxSize;
std::optional<unsigned> MaxSize;

// SchedGroups will only synchronize with other SchedGroups that have the same
// SyncID.
Expand Down Expand Up @@ -175,13 +175,13 @@ class SchedGroup {

SchedGroupMask getMask() { return SGMask; }

SchedGroup(SchedGroupMask SGMask, Optional<unsigned> MaxSize,
SchedGroup(SchedGroupMask SGMask, std::optional<unsigned> MaxSize,
ScheduleDAGInstrs *DAG, const SIInstrInfo *TII)
: SGMask(SGMask), MaxSize(MaxSize), DAG(DAG), TII(TII) {
SGID = NumSchedGroups++;
}

SchedGroup(SchedGroupMask SGMask, Optional<unsigned> MaxSize, int SyncID,
SchedGroup(SchedGroupMask SGMask, std::optional<unsigned> MaxSize, int SyncID,
ScheduleDAGInstrs *DAG, const SIInstrInfo *TII)
: SGMask(SGMask), MaxSize(MaxSize), SyncID(SyncID), DAG(DAG), TII(TII) {
SGID = NumSchedGroups++;
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1904,7 +1904,7 @@ bool AMDGPUDAGToDAGISel::SelectSMRDOffset(SDValue ByteOffsetNode,
// GFX9 and GFX10 have signed byte immediate offsets. The immediate
// offset for S_BUFFER instructions is unsigned.
int64_t ByteOffset = IsBuffer ? C->getZExtValue() : C->getSExtValue();
Optional<int64_t> EncodedOffset =
std::optional<int64_t> EncodedOffset =
AMDGPU::getSMRDEncodedOffset(*Subtarget, ByteOffset, IsBuffer);
if (EncodedOffset && Offset && !Imm32Only) {
*Offset = CurDAG->getTargetConstant(*EncodedOffset, SL, MVT::i32);
Expand Down
37 changes: 19 additions & 18 deletions llvm/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
const TargetRegisterClass *SrcRC
= TRI.getConstrainedRegClassForOperand(Src, *MRI);

Optional<ValueAndVReg> ConstVal =
std::optional<ValueAndVReg> ConstVal =
getIConstantVRegValWithLookThrough(SrcReg, *MRI, true);
if (ConstVal) {
unsigned MovOpc =
Expand Down Expand Up @@ -975,7 +975,7 @@ bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const {

auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst);

Optional<ValueAndVReg> ConstSelect =
std::optional<ValueAndVReg> ConstSelect =
getIConstantVRegValWithLookThrough(LaneSelect, *MRI);
if (ConstSelect) {
// The selector has to be an inline immediate, so we can use whatever for
Expand All @@ -984,7 +984,7 @@ bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const {
MIB.addImm(ConstSelect->Value.getSExtValue() &
maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2()));
} else {
Optional<ValueAndVReg> ConstVal =
std::optional<ValueAndVReg> ConstVal =
getIConstantVRegValWithLookThrough(Val, *MRI);

// If the value written is an inline immediate, we can get away without a
Expand Down Expand Up @@ -1383,7 +1383,7 @@ bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const {
if (Size != STI.getWavefrontSize())
return false;

Optional<ValueAndVReg> Arg =
std::optional<ValueAndVReg> Arg =
getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI);

if (Arg) {
Expand Down Expand Up @@ -3082,7 +3082,7 @@ bool AMDGPUInstructionSelector::selectBufferLoadLds(MachineInstr &MI) const {
}

Register VOffset = MI.getOperand(4 + OpOffset).getReg();
Optional<ValueAndVReg> MaybeVOffset =
std::optional<ValueAndVReg> MaybeVOffset =
getIConstantVRegValWithLookThrough(VOffset, *MRI);
const bool HasVOffset = !MaybeVOffset || MaybeVOffset->Value.getZExtValue();

Expand Down Expand Up @@ -3784,7 +3784,7 @@ bool AMDGPUInstructionSelector::selectSmrdOffset(MachineOperand &Root,
return false;

const GEPInfo &GEPI = AddrInfo[0];
Optional<int64_t> EncodedImm =
std::optional<int64_t> EncodedImm =
AMDGPU::getSMRDEncodedOffset(STI, GEPI.Imm, false);

if (SOffset && Offset) {
Expand Down Expand Up @@ -3856,7 +3856,7 @@ AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {

const GEPInfo &GEPInfo = AddrInfo[0];
Register PtrReg = GEPInfo.SgprParts[0];
Optional<int64_t> EncodedImm =
std::optional<int64_t> EncodedImm =
AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
if (!EncodedImm)
return std::nullopt;
Expand Down Expand Up @@ -4293,7 +4293,8 @@ bool AMDGPUInstructionSelector::isUnneededShiftMask(const MachineInstr &MI,
unsigned ShAmtBits) const {
assert(MI.getOpcode() == TargetOpcode::G_AND);

Optional<APInt> RHS = getIConstantVRegVal(MI.getOperand(2).getReg(), *MRI);
std::optional<APInt> RHS =
getIConstantVRegVal(MI.getOperand(2).getReg(), *MRI);
if (!RHS)
return false;

Expand Down Expand Up @@ -4477,7 +4478,7 @@ AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
return {Root, 0};

MachineOperand &RHS = RootI->getOperand(2);
Optional<ValueAndVReg> MaybeOffset =
std::optional<ValueAndVReg> MaybeOffset =
getIConstantVRegValWithLookThrough(RHS.getReg(), MRI);
if (!MaybeOffset)
return {Root, 0};
Expand Down Expand Up @@ -4804,22 +4805,22 @@ AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const {
}

/// Get an immediate that must be 32-bits, and treated as zero extended.
static Optional<uint64_t> getConstantZext32Val(Register Reg,
const MachineRegisterInfo &MRI) {
static std::optional<uint64_t>
getConstantZext32Val(Register Reg, const MachineRegisterInfo &MRI) {
// getIConstantVRegVal sexts any values, so see if that matters.
Optional<int64_t> OffsetVal = getIConstantVRegSExtVal(Reg, MRI);
std::optional<int64_t> OffsetVal = getIConstantVRegSExtVal(Reg, MRI);
if (!OffsetVal || !isInt<32>(*OffsetVal))
return std::nullopt;
return Lo_32(*OffsetVal);
}

InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {
Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
std::optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
if (!OffsetVal)
return {};

Optional<int64_t> EncodedImm =
std::optional<int64_t> EncodedImm =
AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true);
if (!EncodedImm)
return {};
Expand All @@ -4831,12 +4832,12 @@ InstructionSelector::ComplexRendererFns
AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {
assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);

Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
std::optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
if (!OffsetVal)
return {};

Optional<int64_t> EncodedImm
= AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
std::optional<int64_t> EncodedImm =
AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
if (!EncodedImm)
return {};

Expand All @@ -4854,7 +4855,7 @@ AMDGPUInstructionSelector::selectSMRDBufferSgprImm(MachineOperand &Root) const {
if (!SOffset)
return std::nullopt;

Optional<int64_t> EncodedOffset =
std::optional<int64_t> EncodedOffset =
AMDGPU::getSMRDEncodedOffset(STI, Offset, /* IsBuffer */ true);
if (!EncodedOffset)
return std::nullopt;
Expand Down
14 changes: 7 additions & 7 deletions llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2340,7 +2340,7 @@ bool AMDGPULegalizerInfo::legalizeExtractVectorElt(
// FIXME: Artifact combiner probably should have replaced the truncated
// constant before this, so we shouldn't need
// getIConstantVRegValWithLookThrough.
Optional<ValueAndVReg> MaybeIdxVal =
std::optional<ValueAndVReg> MaybeIdxVal =
getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
if (!MaybeIdxVal) // Dynamic case will be selected to register indexing.
return true;
Expand Down Expand Up @@ -2374,7 +2374,7 @@ bool AMDGPULegalizerInfo::legalizeInsertVectorElt(
// FIXME: Artifact combiner probably should have replaced the truncated
// constant before this, so we shouldn't need
// getIConstantVRegValWithLookThrough.
Optional<ValueAndVReg> MaybeIdxVal =
std::optional<ValueAndVReg> MaybeIdxVal =
getIConstantVRegValWithLookThrough(MI.getOperand(3).getReg(), MRI);
if (!MaybeIdxVal) // Dynamic case will be selected to register indexing.
return true;
Expand Down Expand Up @@ -4188,7 +4188,7 @@ bool AMDGPULegalizerInfo::getLDSKernelId(Register DstReg,
MachineRegisterInfo &MRI,
MachineIRBuilder &B) const {
Function &F = B.getMF().getFunction();
Optional<uint32_t> KnownSize =
std::optional<uint32_t> KnownSize =
AMDGPUMachineFunction::getLDSKernelIdMetadata(F);
if (KnownSize.has_value())
B.buildConstant(DstReg, KnownSize.value());
Expand Down Expand Up @@ -4282,11 +4282,11 @@ void AMDGPULegalizerInfo::updateBufferMMO(MachineMemOperand *MMO,
Register VOffset, Register SOffset,
unsigned ImmOffset, Register VIndex,
MachineRegisterInfo &MRI) const {
Optional<ValueAndVReg> MaybeVOffsetVal =
std::optional<ValueAndVReg> MaybeVOffsetVal =
getIConstantVRegValWithLookThrough(VOffset, MRI);
Optional<ValueAndVReg> MaybeSOffsetVal =
std::optional<ValueAndVReg> MaybeSOffsetVal =
getIConstantVRegValWithLookThrough(SOffset, MRI);
Optional<ValueAndVReg> MaybeVIndexVal =
std::optional<ValueAndVReg> MaybeVIndexVal =
getIConstantVRegValWithLookThrough(VIndex, MRI);
// If the combined VOffset + SOffset + ImmOffset + strided VIndex is constant,
// update the MMO with that offset. The stride is unknown so we can only do
Expand Down Expand Up @@ -5288,7 +5288,7 @@ bool AMDGPULegalizerInfo::legalizeTrapIntrinsic(MachineInstr &MI,
ST.getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbi::AMDHSA)
return legalizeTrapEndpgm(MI, MRI, B);

if (Optional<uint8_t> HsaAbiVer = AMDGPU::getHsaAbiVersion(&ST)) {
if (std::optional<uint8_t> HsaAbiVer = AMDGPU::getHsaAbiVersion(&ST)) {
switch (*HsaAbiVer) {
case ELF::ELFABIVERSION_AMDGPU_HSA_V2:
case ELF::ELFABIVERSION_AMDGPU_HSA_V3:
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ void AMDGPUMachineFunction::allocateKnownAddressLDSGlobal(const Function &F) {
}
}

Optional<uint32_t>
std::optional<uint32_t>
AMDGPUMachineFunction::getLDSKernelIdMetadata(const Function &F) {
auto MD = F.getMetadata("llvm.amdgcn.lds.kernel.id");
if (MD && MD->getNumOperands() == 1) {
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/AMDGPU/AMDGPUMachineFunction.h
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ class AMDGPUMachineFunction : public MachineFunctionInfo {
static bool isKnownAddressLDSGlobal(const GlobalVariable &GV);
static unsigned calculateKnownAddressOfLDSGlobal(const GlobalVariable &GV);

static Optional<uint32_t> getLDSKernelIdMetadata(const Function &F);
static std::optional<uint32_t> getLDSKernelIdMetadata(const Function &F);

Align getDynLDSAlign() const { return DynLDSAlign; }

Expand Down
6 changes: 4 additions & 2 deletions llvm/lib/Target/AMDGPU/AMDGPUMachineModuleInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,8 @@ class AMDGPUMachineModuleInfo final : public MachineModuleInfoELF {
///
/// \returns \p SSID's inclusion ordering, or "std::nullopt" if \p SSID is not
/// supported by the AMDGPU target.
Optional<uint8_t> getSyncScopeInclusionOrdering(SyncScope::ID SSID) const {
std::optional<uint8_t>
getSyncScopeInclusionOrdering(SyncScope::ID SSID) const {
if (SSID == SyncScope::SingleThread ||
SSID == getSingleThreadOneAddressSpaceSSID())
return 0;
Expand Down Expand Up @@ -122,7 +123,8 @@ class AMDGPUMachineModuleInfo final : public MachineModuleInfoELF {
/// synchronization scope \p B, false if synchronization scope \p A is smaller
/// than synchronization scope \p B, or "std::nullopt" if either
/// synchronization scope \p A or \p B is not supported by the AMDGPU target.
Optional<bool> isSyncScopeInclusion(SyncScope::ID A, SyncScope::ID B) const {
std::optional<bool> isSyncScopeInclusion(SyncScope::ID A,
SyncScope::ID B) const {
const auto &AIO = getSyncScopeInclusionOrdering(A);
const auto &BIO = getSyncScopeInclusionOrdering(B);
if (!AIO || !BIO)
Expand Down
9 changes: 5 additions & 4 deletions llvm/lib/Target/AMDGPU/AMDGPUPropagateAttributes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ class AMDGPUPropagateAttributes {
}

FeatureBitset Features;
Optional<Attribute> Attributes[NumAttr];
std::optional<Attribute> Attributes[NumAttr];
};

class Clone {
Expand Down Expand Up @@ -127,7 +127,8 @@ class AMDGPUPropagateAttributes {
void setFeatures(Function &F, const FeatureBitset &NewFeatures);

// Set new function's attributes in place.
void setAttributes(Function &F, const ArrayRef<Optional<Attribute>> NewAttrs);
void setAttributes(Function &F,
const ArrayRef<std::optional<Attribute>> NewAttrs);

std::string getFeatureString(const FeatureBitset &Features) const;

Expand Down Expand Up @@ -343,8 +344,8 @@ void AMDGPUPropagateAttributes::setFeatures(Function &F,
F.addFnAttr("target-features", NewFeatureStr);
}

void AMDGPUPropagateAttributes::setAttributes(Function &F,
const ArrayRef<Optional<Attribute>> NewAttrs) {
void AMDGPUPropagateAttributes::setAttributes(
Function &F, const ArrayRef<std::optional<Attribute>> NewAttrs) {
LLVM_DEBUG(dbgs() << "Set attributes on " << F.getName() << ":\n");
for (unsigned I = 0; I < NumAttr; ++I) {
F.removeFnAttr(AttributeNames[I]);
Expand Down
6 changes: 3 additions & 3 deletions llvm/lib/Target/AMDGPU/AMDGPURegBankCombiner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ bool AMDGPURegBankCombinerHelper::matchIntMinMaxToMed3(

MinMaxMedOpc OpcodeTriple = getMinMaxPair(MI.getOpcode());
Register Val;
Optional<ValueAndVReg> K0, K1;
std::optional<ValueAndVReg> K0, K1;
// Match min(max(Val, K0), K1) or max(min(Val, K1), K0). Then see if K0 <= K1.
if (!matchMed<GCstAndRegMatch>(MI, MRI, OpcodeTriple, Val, K0, K1))
return false;
Expand Down Expand Up @@ -206,7 +206,7 @@ bool AMDGPURegBankCombinerHelper::matchFPMinMaxToMed3(
auto OpcodeTriple = getMinMaxPair(MI.getOpcode());

Register Val;
Optional<FPValueAndVReg> K0, K1;
std::optional<FPValueAndVReg> K0, K1;
// Match min(max(Val, K0), K1) or max(min(Val, K1), K0). Then see if K0 <= K1.
if (!matchMed<GFCstAndRegMatch>(MI, MRI, OpcodeTriple, Val, K0, K1))
return false;
Expand Down Expand Up @@ -238,7 +238,7 @@ bool AMDGPURegBankCombinerHelper::matchFPMinMaxToClamp(MachineInstr &MI,
// Clamp is available on all types after regbankselect (f16, f32, f64, v2f16).
auto OpcodeTriple = getMinMaxPair(MI.getOpcode());
Register Val;
Optional<FPValueAndVReg> K0, K1;
std::optional<FPValueAndVReg> K0, K1;
// Match min(max(Val, K0), K1) or max(min(Val, K1), K0).
if (!matchMed<GFCstOrSplatGFCstMatch>(MI, MRI, OpcodeTriple, Val, K0, K1))
return false;
Expand Down
3 changes: 2 additions & 1 deletion llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1252,7 +1252,8 @@ static unsigned setBufferOffsets(MachineIRBuilder &B,
const LLT S32 = LLT::scalar(32);
MachineRegisterInfo *MRI = B.getMRI();

if (Optional<int64_t> Imm = getIConstantVRegSExtVal(CombinedOffset, *MRI)) {
if (std::optional<int64_t> Imm =
getIConstantVRegSExtVal(CombinedOffset, *MRI)) {
uint32_t SOffset, ImmOffset;
if (AMDGPU::splitMUBUFOffset(*Imm, SOffset, ImmOffset, &RBI.Subtarget,
Alignment)) {
Expand Down
25 changes: 13 additions & 12 deletions llvm/lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1283,10 +1283,10 @@ class AMDGPUAsmParser : public MCTargetAsmParser {
/// \param SGPRBlocks [out] Result SGPR block count.
bool calculateGPRBlocks(const FeatureBitset &Features, bool VCCUsed,
bool FlatScrUsed, bool XNACKUsed,
Optional<bool> EnableWavefrontSize32, unsigned NextFreeVGPR,
SMRange VGPRRange, unsigned NextFreeSGPR,
SMRange SGPRRange, unsigned &VGPRBlocks,
unsigned &SGPRBlocks);
std::optional<bool> EnableWavefrontSize32,
unsigned NextFreeVGPR, SMRange VGPRRange,
unsigned NextFreeSGPR, SMRange SGPRRange,
unsigned &VGPRBlocks, unsigned &SGPRBlocks);
bool ParseDirectiveAMDGCNTarget();
bool ParseDirectiveAMDHSAKernel();
bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
Expand Down Expand Up @@ -1334,7 +1334,7 @@ class AMDGPUAsmParser : public MCTargetAsmParser {

bool isRegister();
bool isRegister(const AsmToken &Token, const AsmToken &NextToken) const;
Optional<StringRef> getGprCountSymbolName(RegisterKind RegKind);
std::optional<StringRef> getGprCountSymbolName(RegisterKind RegKind);
void initializeGprCountSymbol(RegisterKind RegKind);
bool updateGprCountSymbols(RegisterKind RegKind, unsigned DwordRegIndex,
unsigned RegWidth);
Expand Down Expand Up @@ -1667,7 +1667,7 @@ class AMDGPUAsmParser : public MCTargetAsmParser {
const SMLoc &IDLoc);
bool validateExeczVcczOperands(const OperandVector &Operands);
bool validateTFE(const MCInst &Inst, const OperandVector &Operands);
Optional<StringRef> validateLdsDirect(const MCInst &Inst);
std::optional<StringRef> validateLdsDirect(const MCInst &Inst);
unsigned getConstantBusLimit(unsigned Opcode) const;
bool usesConstantBus(const MCInst &Inst, unsigned OpIdx);
bool isInlineConstant(const MCInst &Inst, unsigned OpIdx) const;
Expand Down Expand Up @@ -2853,7 +2853,7 @@ bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind &RegKind, unsigned &Reg,
return false;
}

Optional<StringRef>
std::optional<StringRef>
AMDGPUAsmParser::getGprCountSymbolName(RegisterKind RegKind) {
switch (RegKind) {
case IS_VGPR:
Expand Down Expand Up @@ -4056,7 +4056,8 @@ static bool IsRevOpcode(const unsigned Opcode)
}
}

Optional<StringRef> AMDGPUAsmParser::validateLdsDirect(const MCInst &Inst) {
std::optional<StringRef>
AMDGPUAsmParser::validateLdsDirect(const MCInst &Inst) {

using namespace SIInstrFlags;
const unsigned Opcode = Inst.getOpcode();
Expand Down Expand Up @@ -4914,9 +4915,9 @@ bool AMDGPUAsmParser::OutOfRangeError(SMRange Range) {

bool AMDGPUAsmParser::calculateGPRBlocks(
const FeatureBitset &Features, bool VCCUsed, bool FlatScrUsed,
bool XNACKUsed, Optional<bool> EnableWavefrontSize32, unsigned NextFreeVGPR,
SMRange VGPRRange, unsigned NextFreeSGPR, SMRange SGPRRange,
unsigned &VGPRBlocks, unsigned &SGPRBlocks) {
bool XNACKUsed, std::optional<bool> EnableWavefrontSize32,
unsigned NextFreeVGPR, SMRange VGPRRange, unsigned NextFreeSGPR,
SMRange SGPRRange, unsigned &VGPRBlocks, unsigned &SGPRBlocks) {
// TODO(scott.linder): These calculations are duplicated from
// AMDGPUAsmPrinter::getSIProgramInfo and could be unified.
IsaVersion Version = getIsaVersion(getSTI().getCPU());
Expand Down Expand Up @@ -4984,7 +4985,7 @@ bool AMDGPUAsmParser::ParseDirectiveAMDHSAKernel() {
std::optional<unsigned> ExplicitUserSGPRCount;
bool ReserveVCC = true;
bool ReserveFlatScr = true;
Optional<bool> EnableWavefrontSize32;
std::optional<bool> EnableWavefrontSize32;

while (true) {
while (trySkipToken(AsmToken::EndOfStatement));
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -407,7 +407,7 @@ void AMDGPUTargetAsmStreamer::EmitAmdhsaKernelDescriptor(
if (IVersion.Major >= 7 && !ReserveFlatScr && !hasArchitectedFlatScratch(STI))
OS << "\t\t.amdhsa_reserve_flat_scratch " << ReserveFlatScr << '\n';

if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(&STI)) {
if (std::optional<uint8_t> HsaAbiVer = getHsaAbiVersion(&STI)) {
switch (*HsaAbiVer) {
default:
break;
Expand Down Expand Up @@ -594,7 +594,7 @@ unsigned AMDGPUTargetELFStreamer::getEFlagsUnknownOS() {
unsigned AMDGPUTargetELFStreamer::getEFlagsAMDHSA() {
assert(STI.getTargetTriple().getOS() == Triple::AMDHSA);

if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(&STI)) {
if (std::optional<uint8_t> HsaAbiVer = getHsaAbiVersion(&STI)) {
switch (*HsaAbiVer) {
case ELF::ELFABIVERSION_AMDGPU_HSA_V2:
case ELF::ELFABIVERSION_AMDGPU_HSA_V3:
Expand Down
6 changes: 3 additions & 3 deletions llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ class AMDGPUTargetStreamer : public MCTargetStreamer {

protected:
// TODO: Move HSAMetadataStream to AMDGPUTargetStreamer.
Optional<AMDGPU::IsaInfo::AMDGPUTargetID> TargetID;
std::optional<AMDGPU::IsaInfo::AMDGPUTargetID> TargetID;

MCContext &getContext() const { return Streamer.getContext(); }

Expand Down Expand Up @@ -98,10 +98,10 @@ class AMDGPUTargetStreamer : public MCTargetStreamer {
static StringRef getArchNameFromElfMach(unsigned ElfMach);
static unsigned getElfMach(StringRef GPU);

const Optional<AMDGPU::IsaInfo::AMDGPUTargetID> &getTargetID() const {
const std::optional<AMDGPU::IsaInfo::AMDGPUTargetID> &getTargetID() const {
return TargetID;
}
Optional<AMDGPU::IsaInfo::AMDGPUTargetID> &getTargetID() {
std::optional<AMDGPU::IsaInfo::AMDGPUTargetID> &getTargetID() {
return TargetID;
}
void initializeTargetID(const MCSubtargetInfo &STI) {
Expand Down
10 changes: 5 additions & 5 deletions llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ static MCRegister findScratchNonCalleeSaveRegister(MachineRegisterInfo &MRI,
static void getVGPRSpillLaneOrTempRegister(MachineFunction &MF,
LivePhysRegs &LiveRegs,
Register &TempSGPR,
Optional<int> &FrameIndex,
std::optional<int> &FrameIndex,
bool IsFP) {
SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
MachineFrameInfo &FrameInfo = MF.getFrameInfo();
Expand Down Expand Up @@ -773,8 +773,8 @@ void SIFrameLowering::emitPrologue(MachineFunction &MF,
// turn on all lanes before doing the spill to memory.
Register ScratchExecCopy;

Optional<int> FPSaveIndex = FuncInfo->FramePointerSaveIndex;
Optional<int> BPSaveIndex = FuncInfo->BasePointerSaveIndex;
std::optional<int> FPSaveIndex = FuncInfo->FramePointerSaveIndex;
std::optional<int> BPSaveIndex = FuncInfo->BasePointerSaveIndex;

// VGPRs used for SGPR->VGPR spills
for (const SIMachineFunctionInfo::SGPRSpillVGPR &Reg :
Expand Down Expand Up @@ -990,8 +990,8 @@ void SIFrameLowering::emitEpilogue(MachineFunction &MF,
const Register BasePtrReg =
TRI.hasBasePointer(MF) ? TRI.getBaseRegister() : Register();

Optional<int> FPSaveIndex = FuncInfo->FramePointerSaveIndex;
Optional<int> BPSaveIndex = FuncInfo->BasePointerSaveIndex;
std::optional<int> FPSaveIndex = FuncInfo->FramePointerSaveIndex;
std::optional<int> BPSaveIndex = FuncInfo->BasePointerSaveIndex;

if (RoundedSize != 0 && hasFP(MF)) {
auto Add = BuildMI(MBB, MBBI, DL, TII->get(AMDGPU::S_ADD_I32), StackPtrReg)
Expand Down
9 changes: 5 additions & 4 deletions llvm/lib/Target/AMDGPU/SIISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1702,7 +1702,7 @@ SDValue SITargetLowering::getLDSKernelId(SelectionDAG &DAG,
const SDLoc &SL) const {

Function &F = DAG.getMachineFunction().getFunction();
Optional<uint32_t> KnownSize =
std::optional<uint32_t> KnownSize =
AMDGPUMachineFunction::getLDSKernelIdMetadata(F);
if (KnownSize.has_value())
return DAG.getConstant(KnownSize.value(), SL, MVT::i32);
Expand Down Expand Up @@ -2856,7 +2856,8 @@ void SITargetLowering::passSpecialInputs(
// input for kernels, and is computed from the kernarg segment pointer.
InputReg = getImplicitArgPtr(DAG, DL);
} else if (InputID == AMDGPUFunctionArgInfo::LDS_KERNEL_ID) {
Optional<uint32_t> Id = AMDGPUMachineFunction::getLDSKernelIdMetadata(F);
std::optional<uint32_t> Id =
AMDGPUMachineFunction::getLDSKernelIdMetadata(F);
if (Id.has_value()) {
InputReg = DAG.getConstant(Id.value(), DL, ArgVT);
} else {
Expand Down Expand Up @@ -5421,7 +5422,7 @@ SDValue SITargetLowering::lowerTRAP(SDValue Op, SelectionDAG &DAG) const {
Subtarget->getTrapHandlerAbi() != GCNSubtarget::TrapHandlerAbi::AMDHSA)
return lowerTrapEndpgm(Op, DAG);

if (Optional<uint8_t> HsaAbiVer = AMDGPU::getHsaAbiVersion(Subtarget)) {
if (std::optional<uint8_t> HsaAbiVer = AMDGPU::getHsaAbiVersion(Subtarget)) {
switch (*HsaAbiVer) {
case ELF::ELFABIVERSION_AMDGPU_HSA_V2:
case ELF::ELFABIVERSION_AMDGPU_HSA_V3:
Expand Down Expand Up @@ -10240,7 +10241,7 @@ bool SITargetLowering::isCanonicalized(Register Reg, MachineFunction &MF,
if (Opcode == AMDGPU::G_FCANONICALIZE)
return true;

Optional<FPValueAndVReg> FCR;
std::optional<FPValueAndVReg> FCR;
// Constant splat (can be padded with undef) or scalar constant.
if (mi_match(Reg, MRI, MIPatternMatch::m_GFCstOrSplat(FCR))) {
if (FCR->Value.isSignaling())
Expand Down
4 changes: 2 additions & 2 deletions llvm/lib/Target/AMDGPU/SILoadStoreOptimizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -263,7 +263,7 @@ class SILoadStoreOptimizer : public MachineFunctionPass {
int32_t NewOffset) const;
Register computeBase(MachineInstr &MI, const MemAddress &Addr) const;
MachineOperand createRegOrImm(int32_t Val, MachineInstr &MI) const;
Optional<int32_t> extractConstOffset(const MachineOperand &Op) const;
std::optional<int32_t> extractConstOffset(const MachineOperand &Op) const;
void processBaseWithConstOffset(const MachineOperand &Base, MemAddress &Addr) const;
/// Promotes constant offset to the immediate by adjusting the base. It
/// tries to use a base from the nearby instructions that allows it to have
Expand Down Expand Up @@ -1983,7 +1983,7 @@ void SILoadStoreOptimizer::updateBaseAndOffset(MachineInstr &MI,
TII->getNamedOperand(MI, AMDGPU::OpName::offset)->setImm(NewOffset);
}

Optional<int32_t>
std::optional<int32_t>
SILoadStoreOptimizer::extractConstOffset(const MachineOperand &Op) const {
if (Op.isImm())
return Op.getImm();
Expand Down
2 changes: 1 addition & 1 deletion llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -340,7 +340,7 @@ bool SIMachineFunctionInfo::allocateSGPRSpillToVGPR(MachineFunction &MF,
return false;
}

Optional<int> SpillFI;
std::optional<int> SpillFI;
// We need to preserve inactive lanes, so always save, even caller-save
// registers.
if (!isEntryFunction()) {
Expand Down
14 changes: 7 additions & 7 deletions llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -424,7 +424,7 @@ class SIMachineFunctionInfo final : public AMDGPUMachineFunction {
// Current recorded maximum possible occupancy.
unsigned Occupancy;

mutable Optional<bool> UsesAGPRs;
mutable std::optional<bool> UsesAGPRs;

MCPhysReg getNextUserSGPR() const;

Expand All @@ -437,9 +437,9 @@ class SIMachineFunctionInfo final : public AMDGPUMachineFunction {

// If the VGPR is used for SGPR spills in a non-entrypoint function, the
// stack slot used to save/restore it in the prolog/epilog.
Optional<int> FI;
std::optional<int> FI;

SGPRSpillVGPR(Register V, Optional<int> F) : VGPR(V), FI(F) {}
SGPRSpillVGPR(Register V, std::optional<int> F) : VGPR(V), FI(F) {}
};

struct VGPRSpillToAGPR {
Expand Down Expand Up @@ -483,7 +483,7 @@ class SIMachineFunctionInfo final : public AMDGPUMachineFunction {

// Emergency stack slot. Sometimes, we create this before finalizing the stack
// frame, so save it here and add it to the RegScavenger later.
Optional<int> ScavengeFI;
std::optional<int> ScavengeFI;

private:
Register VGPRForAGPRCopy;
Expand All @@ -501,12 +501,12 @@ class SIMachineFunctionInfo final : public AMDGPUMachineFunction {
/// If this is set, an SGPR used for save/restore of the register used for the
/// frame pointer.
Register SGPRForFPSaveRestoreCopy;
Optional<int> FramePointerSaveIndex;
std::optional<int> FramePointerSaveIndex;

/// If this is set, an SGPR used for save/restore of the register used for the
/// base pointer.
Register SGPRForBPSaveRestoreCopy;
Optional<int> BasePointerSaveIndex;
std::optional<int> BasePointerSaveIndex;

bool isCalleeSavedReg(const MCPhysReg *CSRegs, MCPhysReg Reg);

Expand Down Expand Up @@ -573,7 +573,7 @@ class SIMachineFunctionInfo final : public AMDGPUMachineFunction {
bool ResetSGPRSpillStackIDs);

int getScavengeFI(MachineFrameInfo &MFI, const SIRegisterInfo &TRI);
Optional<int> getOptionalScavengeFI() const { return ScavengeFI; }
std::optional<int> getOptionalScavengeFI() const { return ScavengeFI; }

unsigned getBytesInStackArgArea() const {
return BytesInStackArgArea;
Expand Down
40 changes: 20 additions & 20 deletions llvm/lib/Target/AMDGPU/SIMemoryLegalizer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -221,40 +221,40 @@ class SIMemOpAccess final {
/// the SI atomic scope it corresponds to, the address spaces it
/// covers, and whether the memory ordering applies between address
/// spaces.
Optional<std::tuple<SIAtomicScope, SIAtomicAddrSpace, bool>>
std::optional<std::tuple<SIAtomicScope, SIAtomicAddrSpace, bool>>
toSIAtomicScope(SyncScope::ID SSID, SIAtomicAddrSpace InstrAddrSpace) const;

/// \return Return a bit set of the address spaces accessed by \p AS.
SIAtomicAddrSpace toSIAtomicAddrSpace(unsigned AS) const;

/// \returns Info constructed from \p MI, which has at least machine memory
/// operand.
Optional<SIMemOpInfo> constructFromMIWithMMO(
const MachineBasicBlock::iterator &MI) const;
std::optional<SIMemOpInfo>
constructFromMIWithMMO(const MachineBasicBlock::iterator &MI) const;

public:
/// Construct class to support accessing the machine memory operands
/// of instructions in the machine function \p MF.
SIMemOpAccess(MachineFunction &MF);

/// \returns Load info if \p MI is a load operation, "std::nullopt" otherwise.
Optional<SIMemOpInfo> getLoadInfo(
const MachineBasicBlock::iterator &MI) const;
std::optional<SIMemOpInfo>
getLoadInfo(const MachineBasicBlock::iterator &MI) const;

/// \returns Store info if \p MI is a store operation, "std::nullopt"
/// otherwise.
Optional<SIMemOpInfo> getStoreInfo(
const MachineBasicBlock::iterator &MI) const;
std::optional<SIMemOpInfo>
getStoreInfo(const MachineBasicBlock::iterator &MI) const;

/// \returns Atomic fence info if \p MI is an atomic fence operation,
/// "std::nullopt" otherwise.
Optional<SIMemOpInfo> getAtomicFenceInfo(
const MachineBasicBlock::iterator &MI) const;
std::optional<SIMemOpInfo>
getAtomicFenceInfo(const MachineBasicBlock::iterator &MI) const;

/// \returns Atomic cmpxchg/rmw info if \p MI is an atomic cmpxchg or
/// rmw operation, "std::nullopt" otherwise.
Optional<SIMemOpInfo> getAtomicCmpxchgOrRmwInfo(
const MachineBasicBlock::iterator &MI) const;
std::optional<SIMemOpInfo>
getAtomicCmpxchgOrRmwInfo(const MachineBasicBlock::iterator &MI) const;
};

class SICacheControl {
Expand Down Expand Up @@ -622,7 +622,7 @@ void SIMemOpAccess::reportUnsupported(const MachineBasicBlock::iterator &MI,
Func.getContext().diagnose(Diag);
}

Optional<std::tuple<SIAtomicScope, SIAtomicAddrSpace, bool>>
std::optional<std::tuple<SIAtomicScope, SIAtomicAddrSpace, bool>>
SIMemOpAccess::toSIAtomicScope(SyncScope::ID SSID,
SIAtomicAddrSpace InstrAddrSpace) const {
if (SSID == SyncScope::System)
Expand Down Expand Up @@ -687,7 +687,7 @@ SIMemOpAccess::SIMemOpAccess(MachineFunction &MF) {
MMI = &MF.getMMI().getObjFileInfo<AMDGPUMachineModuleInfo>();
}

Optional<SIMemOpInfo> SIMemOpAccess::constructFromMIWithMMO(
std::optional<SIMemOpInfo> SIMemOpAccess::constructFromMIWithMMO(
const MachineBasicBlock::iterator &MI) const {
assert(MI->getNumMemOperands() > 0);

Expand Down Expand Up @@ -747,8 +747,8 @@ Optional<SIMemOpInfo> SIMemOpAccess::constructFromMIWithMMO(
IsNonTemporal);
}

Optional<SIMemOpInfo> SIMemOpAccess::getLoadInfo(
const MachineBasicBlock::iterator &MI) const {
std::optional<SIMemOpInfo>
SIMemOpAccess::getLoadInfo(const MachineBasicBlock::iterator &MI) const {
assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic);

if (!(MI->mayLoad() && !MI->mayStore()))
Expand All @@ -761,8 +761,8 @@ Optional<SIMemOpInfo> SIMemOpAccess::getLoadInfo(
return constructFromMIWithMMO(MI);
}

Optional<SIMemOpInfo> SIMemOpAccess::getStoreInfo(
const MachineBasicBlock::iterator &MI) const {
std::optional<SIMemOpInfo>
SIMemOpAccess::getStoreInfo(const MachineBasicBlock::iterator &MI) const {
assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic);

if (!(!MI->mayLoad() && MI->mayStore()))
Expand All @@ -775,8 +775,8 @@ Optional<SIMemOpInfo> SIMemOpAccess::getStoreInfo(
return constructFromMIWithMMO(MI);
}

Optional<SIMemOpInfo> SIMemOpAccess::getAtomicFenceInfo(
const MachineBasicBlock::iterator &MI) const {
std::optional<SIMemOpInfo>
SIMemOpAccess::getAtomicFenceInfo(const MachineBasicBlock::iterator &MI) const {
assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic);

if (MI->getOpcode() != AMDGPU::ATOMIC_FENCE)
Expand Down Expand Up @@ -808,7 +808,7 @@ Optional<SIMemOpInfo> SIMemOpAccess::getAtomicFenceInfo(
IsCrossAddressSpaceOrdering, AtomicOrdering::NotAtomic);
}

Optional<SIMemOpInfo> SIMemOpAccess::getAtomicCmpxchgOrRmwInfo(
std::optional<SIMemOpInfo> SIMemOpAccess::getAtomicCmpxchgOrRmwInfo(
const MachineBasicBlock::iterator &MI) const {
assert(MI->getDesc().TSFlags & SIInstrFlags::maybeAtomic);

Expand Down
5 changes: 3 additions & 2 deletions llvm/lib/Target/AMDGPU/SIPeepholeSDWA.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ class SIPeepholeSDWA : public MachineFunctionPass {
MapVector<MachineInstr *, SDWAOperandsVector> PotentialMatches;
SmallVector<MachineInstr *, 8> ConvertedInstructions;

Optional<int64_t> foldToImm(const MachineOperand &Op) const;
std::optional<int64_t> foldToImm(const MachineOperand &Op) const;

public:
static char ID;
Expand Down Expand Up @@ -490,7 +490,8 @@ bool SDWADstPreserveOperand::convertToSDWA(MachineInstr &MI,
return SDWADstOperand::convertToSDWA(MI, TII);
}

Optional<int64_t> SIPeepholeSDWA::foldToImm(const MachineOperand &Op) const {
std::optional<int64_t>
SIPeepholeSDWA::foldToImm(const MachineOperand &Op) const {
if (Op.isImm()) {
return Op.getImm();
}
Expand Down
35 changes: 18 additions & 17 deletions llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ namespace llvm {

namespace AMDGPU {

Optional<uint8_t> getHsaAbiVersion(const MCSubtargetInfo *STI) {
std::optional<uint8_t> getHsaAbiVersion(const MCSubtargetInfo *STI) {
if (STI && STI->getTargetTriple().getOS() != Triple::AMDHSA)
return std::nullopt;

Expand All @@ -118,25 +118,25 @@ Optional<uint8_t> getHsaAbiVersion(const MCSubtargetInfo *STI) {
}

bool isHsaAbiVersion2(const MCSubtargetInfo *STI) {
if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
if (std::optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V2;
return false;
}

bool isHsaAbiVersion3(const MCSubtargetInfo *STI) {
if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
if (std::optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V3;
return false;
}

bool isHsaAbiVersion4(const MCSubtargetInfo *STI) {
if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
if (std::optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V4;
return false;
}

bool isHsaAbiVersion5(const MCSubtargetInfo *STI) {
if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
if (std::optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V5;
return false;
}
Expand Down Expand Up @@ -536,7 +536,7 @@ unsigned ComponentInfo::getIndexInParsedOperands(unsigned CompOprIdx) const {
return 0;
}

Optional<unsigned> InstInfo::getInvalidCompOperandIndex(
std::optional<unsigned> InstInfo::getInvalidCompOperandIndex(
std::function<unsigned(unsigned, unsigned)> GetRegIdx) const {

auto OpXRegs = getRegIndices(ComponentIndex::X, GetRegIdx);
Expand Down Expand Up @@ -711,7 +711,7 @@ std::string AMDGPUTargetID::toString() const {
.str();

std::string Features;
if (Optional<uint8_t> HsaAbiVersion = getHsaAbiVersion(&STI)) {
if (std::optional<uint8_t> HsaAbiVersion = getHsaAbiVersion(&STI)) {
switch (*HsaAbiVersion) {
case ELF::ELFABIVERSION_AMDGPU_HSA_V2:
// Code object V2 only supported specific processors and had fixed
Expand Down Expand Up @@ -972,7 +972,7 @@ unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs) {
}

unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI,
Optional<bool> EnableWavefrontSize32) {
std::optional<bool> EnableWavefrontSize32) {
if (STI->getFeatureBits().test(FeatureGFX90AInsts))
return 8;

Expand All @@ -990,7 +990,7 @@ unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI,
}

unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI,
Optional<bool> EnableWavefrontSize32) {
std::optional<bool> EnableWavefrontSize32) {
if (STI->getFeatureBits().test(FeatureGFX90AInsts))
return 8;

Expand Down Expand Up @@ -1062,7 +1062,7 @@ unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
}

unsigned getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs,
Optional<bool> EnableWavefrontSize32) {
std::optional<bool> EnableWavefrontSize32) {
NumVGPRs = alignTo(std::max(1u, NumVGPRs),
getVGPREncodingGranule(STI, EnableWavefrontSize32));
// VGPRBlocks is actual number of VGPR blocks minus 1.
Expand Down Expand Up @@ -2489,30 +2489,31 @@ uint64_t convertSMRDOffsetUnits(const MCSubtargetInfo &ST,
return ByteOffset >> 2;
}

Optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST,
int64_t ByteOffset, bool IsBuffer) {
std::optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST,
int64_t ByteOffset, bool IsBuffer) {
// The signed version is always a byte offset.
if (!IsBuffer && hasSMRDSignedImmOffset(ST)) {
assert(hasSMEMByteOffset(ST));
return isInt<20>(ByteOffset) ? Optional<int64_t>(ByteOffset) : std::nullopt;
return isInt<20>(ByteOffset) ? std::optional<int64_t>(ByteOffset)
: std::nullopt;
}

if (!isDwordAligned(ByteOffset) && !hasSMEMByteOffset(ST))
return std::nullopt;

int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset);
return isLegalSMRDEncodedUnsignedOffset(ST, EncodedOffset)
? Optional<int64_t>(EncodedOffset)
? std::optional<int64_t>(EncodedOffset)
: std::nullopt;
}

Optional<int64_t> getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST,
int64_t ByteOffset) {
std::optional<int64_t> getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST,
int64_t ByteOffset) {
if (!isCI(ST) || !isDwordAligned(ByteOffset))
return std::nullopt;

int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset);
return isUInt<32>(EncodedOffset) ? Optional<int64_t>(EncodedOffset)
return isUInt<32>(EncodedOffset) ? std::optional<int64_t>(EncodedOffset)
: std::nullopt;
}

Expand Down
27 changes: 14 additions & 13 deletions llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ namespace AMDGPU {
struct IsaVersion;

/// \returns HSA OS ABI Version identification.
Optional<uint8_t> getHsaAbiVersion(const MCSubtargetInfo *STI);
std::optional<uint8_t> getHsaAbiVersion(const MCSubtargetInfo *STI);
/// \returns True if HSA OS ABI Version identification is 2,
/// false otherwise.
bool isHsaAbiVersion2(const MCSubtargetInfo *STI);
Expand Down Expand Up @@ -264,15 +264,15 @@ unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs);
/// the ENABLE_WAVEFRONT_SIZE32 kernel descriptor field.
unsigned
getVGPRAllocGranule(const MCSubtargetInfo *STI,
Optional<bool> EnableWavefrontSize32 = std::nullopt);
std::optional<bool> EnableWavefrontSize32 = std::nullopt);

/// \returns VGPR encoding granularity for given subtarget \p STI.
///
/// For subtargets which support it, \p EnableWavefrontSize32 should match
/// the ENABLE_WAVEFRONT_SIZE32 kernel descriptor field.
unsigned
getVGPREncodingGranule(const MCSubtargetInfo *STI,
Optional<bool> EnableWavefrontSize32 = std::nullopt);
unsigned getVGPREncodingGranule(
const MCSubtargetInfo *STI,
std::optional<bool> EnableWavefrontSize32 = std::nullopt);

/// \returns Total number of VGPRs for given subtarget \p STI.
unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI);
Expand All @@ -298,8 +298,9 @@ unsigned getNumWavesPerEUWithNumVGPRs(const MCSubtargetInfo *STI,
///
/// For subtargets which support it, \p EnableWavefrontSize32 should match the
/// ENABLE_WAVEFRONT_SIZE32 kernel descriptor field.
unsigned getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs,
Optional<bool> EnableWavefrontSize32 = std::nullopt);
unsigned
getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs,
std::optional<bool> EnableWavefrontSize32 = std::nullopt);

} // end namespace IsaInfo

Expand Down Expand Up @@ -546,7 +547,7 @@ constexpr unsigned COMPONENTS_NUM = 2;
class ComponentProps {
private:
unsigned SrcOperandsNum = 0;
Optional<unsigned> MandatoryLiteralIdx;
std::optional<unsigned> MandatoryLiteralIdx;
bool HasSrc2Acc = false;

public:
Expand Down Expand Up @@ -738,7 +739,7 @@ class InstInfo {

// Check VOPD operands constraints.
// Return the index of an invalid component operand, if any.
Optional<unsigned> getInvalidCompOperandIndex(
std::optional<unsigned> getInvalidCompOperandIndex(
std::function<unsigned(unsigned, unsigned)> GetRegIdx) const;

private:
Expand Down Expand Up @@ -1252,13 +1253,13 @@ uint64_t convertSMRDOffsetUnits(const MCSubtargetInfo &ST, uint64_t ByteOffset);
/// SMRD offset field, or std::nullopt if it won't fit. On GFX9 and GFX10
/// S_LOAD instructions have a signed offset, on other subtargets it is
/// unsigned. S_BUFFER has an unsigned offset for all subtargets.
Optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST,
int64_t ByteOffset, bool IsBuffer);
std::optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST,
int64_t ByteOffset, bool IsBuffer);

/// \return The encoding that can be used for a 32-bit literal offset in an SMRD
/// instruction. This is only useful on CI.s
Optional<int64_t> getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST,
int64_t ByteOffset);
std::optional<int64_t> getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST,
int64_t ByteOffset);

/// For FLAT segment the offset must be positive;
/// MSB is ignored and forced to zero.
Expand Down
130 changes: 52 additions & 78 deletions llvm/unittests/CodeGen/GlobalISel/ConstantFoldingTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -80,158 +80,132 @@ TEST_F(AArch64GISelMITest, FoldBinOp) {
auto MIBFCst2 = B.buildFConstant(s32, 2.0);

// Test G_ADD folding Integer + Mixed Int-Float cases
Optional<APInt> FoldGAddInt =
ConstantFoldBinOp(TargetOpcode::G_ADD, MIBCst1.getReg(0),
MIBCst2.getReg(0), *MRI);
std::optional<APInt> FoldGAddInt = ConstantFoldBinOp(
TargetOpcode::G_ADD, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGAddInt.has_value());
EXPECT_EQ(25ULL, FoldGAddInt.value().getLimitedValue());
Optional<APInt> FoldGAddMix =
ConstantFoldBinOp(TargetOpcode::G_ADD, MIBCst1.getReg(0),
MIBFCst2.getReg(0), *MRI);
std::optional<APInt> FoldGAddMix = ConstantFoldBinOp(
TargetOpcode::G_ADD, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGAddMix.has_value());
EXPECT_EQ(1073741840ULL, FoldGAddMix.value().getLimitedValue());

// Test G_AND folding Integer + Mixed Int-Float cases
Optional<APInt> FoldGAndInt =
ConstantFoldBinOp(TargetOpcode::G_AND, MIBCst1.getReg(0),
MIBCst2.getReg(0), *MRI);
std::optional<APInt> FoldGAndInt = ConstantFoldBinOp(
TargetOpcode::G_AND, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGAndInt.has_value());
EXPECT_EQ(0ULL, FoldGAndInt.value().getLimitedValue());
Optional<APInt> FoldGAndMix =
ConstantFoldBinOp(TargetOpcode::G_AND, MIBCst2.getReg(0),
MIBFCst1.getReg(0), *MRI);
std::optional<APInt> FoldGAndMix = ConstantFoldBinOp(
TargetOpcode::G_AND, MIBCst2.getReg(0), MIBFCst1.getReg(0), *MRI);
EXPECT_TRUE(FoldGAndMix.has_value());
EXPECT_EQ(1ULL, FoldGAndMix.value().getLimitedValue());

// Test G_ASHR folding Integer + Mixed cases
Optional<APInt> FoldGAShrInt =
ConstantFoldBinOp(TargetOpcode::G_ASHR, MIBCst1.getReg(0),
MIBCst2.getReg(0), *MRI);
std::optional<APInt> FoldGAShrInt = ConstantFoldBinOp(
TargetOpcode::G_ASHR, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGAShrInt.has_value());
EXPECT_EQ(0ULL, FoldGAShrInt.value().getLimitedValue());
Optional<APInt> FoldGAShrMix =
ConstantFoldBinOp(TargetOpcode::G_ASHR, MIBFCst2.getReg(0),
MIBCst2.getReg(0), *MRI);
std::optional<APInt> FoldGAShrMix = ConstantFoldBinOp(
TargetOpcode::G_ASHR, MIBFCst2.getReg(0), MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGAShrMix.has_value());
EXPECT_EQ(2097152ULL, FoldGAShrMix.value().getLimitedValue());

// Test G_LSHR folding Integer + Mixed Int-Float cases
Optional<APInt> FoldGLShrInt =
ConstantFoldBinOp(TargetOpcode::G_LSHR, MIBCst1.getReg(0),
MIBCst2.getReg(0), *MRI);
std::optional<APInt> FoldGLShrInt = ConstantFoldBinOp(
TargetOpcode::G_LSHR, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGLShrInt.has_value());
EXPECT_EQ(0ULL, FoldGLShrInt.value().getLimitedValue());
Optional<APInt> FoldGLShrMix =
ConstantFoldBinOp(TargetOpcode::G_LSHR, MIBFCst1.getReg(0),
MIBCst2.getReg(0), *MRI);
std::optional<APInt> FoldGLShrMix = ConstantFoldBinOp(
TargetOpcode::G_LSHR, MIBFCst1.getReg(0), MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGLShrMix.has_value());
EXPECT_EQ(2080768ULL, FoldGLShrMix.value().getLimitedValue());

// Test G_MUL folding Integer + Mixed Int-Float cases
Optional<APInt> FoldGMulInt =
ConstantFoldBinOp(TargetOpcode::G_MUL, MIBCst1.getReg(0),
MIBCst2.getReg(0), *MRI);
std::optional<APInt> FoldGMulInt = ConstantFoldBinOp(
TargetOpcode::G_MUL, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGMulInt.has_value());
EXPECT_EQ(144ULL, FoldGMulInt.value().getLimitedValue());
Optional<APInt> FoldGMulMix =
ConstantFoldBinOp(TargetOpcode::G_MUL, MIBCst1.getReg(0),
MIBFCst2.getReg(0), *MRI);
std::optional<APInt> FoldGMulMix = ConstantFoldBinOp(
TargetOpcode::G_MUL, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGMulMix.has_value());
EXPECT_EQ(0ULL, FoldGMulMix.value().getLimitedValue());

// Test G_OR folding Integer + Mixed Int-Float cases
Optional<APInt> FoldGOrInt =
ConstantFoldBinOp(TargetOpcode::G_OR, MIBCst1.getReg(0),
MIBCst2.getReg(0), *MRI);
std::optional<APInt> FoldGOrInt = ConstantFoldBinOp(
TargetOpcode::G_OR, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGOrInt.has_value());
EXPECT_EQ(25ULL, FoldGOrInt.value().getLimitedValue());
Optional<APInt> FoldGOrMix =
ConstantFoldBinOp(TargetOpcode::G_OR, MIBCst1.getReg(0),
MIBFCst2.getReg(0), *MRI);
std::optional<APInt> FoldGOrMix = ConstantFoldBinOp(
TargetOpcode::G_OR, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGOrMix.has_value());
EXPECT_EQ(1073741840ULL, FoldGOrMix.value().getLimitedValue());

// Test G_SHL folding Integer + Mixed Int-Float cases
Optional<APInt> FoldGShlInt =
ConstantFoldBinOp(TargetOpcode::G_SHL, MIBCst1.getReg(0),
MIBCst2.getReg(0), *MRI);
std::optional<APInt> FoldGShlInt = ConstantFoldBinOp(
TargetOpcode::G_SHL, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGShlInt.has_value());
EXPECT_EQ(8192ULL, FoldGShlInt.value().getLimitedValue());
Optional<APInt> FoldGShlMix =
ConstantFoldBinOp(TargetOpcode::G_SHL, MIBCst1.getReg(0),
MIBFCst2.getReg(0), *MRI);
std::optional<APInt> FoldGShlMix = ConstantFoldBinOp(
TargetOpcode::G_SHL, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGShlMix.has_value());
EXPECT_EQ(0ULL, FoldGShlMix.value().getLimitedValue());

// Test G_SUB folding Integer + Mixed Int-Float cases
Optional<APInt> FoldGSubInt =
ConstantFoldBinOp(TargetOpcode::G_SUB, MIBCst1.getReg(0),
MIBCst2.getReg(0), *MRI);
std::optional<APInt> FoldGSubInt = ConstantFoldBinOp(
TargetOpcode::G_SUB, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGSubInt.has_value());
EXPECT_EQ(7ULL, FoldGSubInt.value().getLimitedValue());
Optional<APInt> FoldGSubMix =
ConstantFoldBinOp(TargetOpcode::G_SUB, MIBCst1.getReg(0),
MIBFCst2.getReg(0), *MRI);
std::optional<APInt> FoldGSubMix = ConstantFoldBinOp(
TargetOpcode::G_SUB, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGSubMix.has_value());
EXPECT_EQ(3221225488ULL, FoldGSubMix.value().getLimitedValue());

// Test G_XOR folding Integer + Mixed Int-Float cases
Optional<APInt> FoldGXorInt =
ConstantFoldBinOp(TargetOpcode::G_XOR, MIBCst1.getReg(0),
MIBCst2.getReg(0), *MRI);
std::optional<APInt> FoldGXorInt = ConstantFoldBinOp(
TargetOpcode::G_XOR, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGXorInt.has_value());
EXPECT_EQ(25ULL, FoldGXorInt.value().getLimitedValue());
Optional<APInt> FoldGXorMix =
ConstantFoldBinOp(TargetOpcode::G_XOR, MIBCst1.getReg(0),
MIBFCst2.getReg(0), *MRI);
std::optional<APInt> FoldGXorMix = ConstantFoldBinOp(
TargetOpcode::G_XOR, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGXorMix.has_value());
EXPECT_EQ(1073741840ULL, FoldGXorMix.value().getLimitedValue());

// Test G_UDIV folding Integer + Mixed Int-Float cases
Optional<APInt> FoldGUdivInt =
ConstantFoldBinOp(TargetOpcode::G_UDIV, MIBCst1.getReg(0),
MIBCst2.getReg(0), *MRI);
std::optional<APInt> FoldGUdivInt = ConstantFoldBinOp(
TargetOpcode::G_UDIV, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGUdivInt.has_value());
EXPECT_EQ(1ULL, FoldGUdivInt.value().getLimitedValue());
Optional<APInt> FoldGUdivMix =
ConstantFoldBinOp(TargetOpcode::G_UDIV, MIBCst1.getReg(0),
MIBFCst2.getReg(0), *MRI);
std::optional<APInt> FoldGUdivMix = ConstantFoldBinOp(
TargetOpcode::G_UDIV, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGUdivMix.has_value());
EXPECT_EQ(0ULL, FoldGUdivMix.value().getLimitedValue());

// Test G_SDIV folding Integer + Mixed Int-Float cases
Optional<APInt> FoldGSdivInt =
ConstantFoldBinOp(TargetOpcode::G_SDIV, MIBCst1.getReg(0),
MIBCst2.getReg(0), *MRI);
std::optional<APInt> FoldGSdivInt = ConstantFoldBinOp(
TargetOpcode::G_SDIV, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGSdivInt.has_value());
EXPECT_EQ(1ULL, FoldGSdivInt.value().getLimitedValue());
Optional<APInt> FoldGSdivMix =
ConstantFoldBinOp(TargetOpcode::G_SDIV, MIBCst1.getReg(0),
MIBFCst2.getReg(0), *MRI);
std::optional<APInt> FoldGSdivMix = ConstantFoldBinOp(
TargetOpcode::G_SDIV, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGSdivMix.has_value());
EXPECT_EQ(0ULL, FoldGSdivMix.value().getLimitedValue());

// Test G_UREM folding Integer + Mixed Int-Float cases
Optional<APInt> FoldGUremInt =
ConstantFoldBinOp(TargetOpcode::G_UDIV, MIBCst1.getReg(0),
MIBCst2.getReg(0), *MRI);
std::optional<APInt> FoldGUremInt = ConstantFoldBinOp(
TargetOpcode::G_UDIV, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGUremInt.has_value());
EXPECT_EQ(1ULL, FoldGUremInt.value().getLimitedValue());
Optional<APInt> FoldGUremMix =
ConstantFoldBinOp(TargetOpcode::G_UDIV, MIBCst1.getReg(0),
MIBFCst2.getReg(0), *MRI);
std::optional<APInt> FoldGUremMix = ConstantFoldBinOp(
TargetOpcode::G_UDIV, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGUremMix.has_value());
EXPECT_EQ(0ULL, FoldGUremMix.value().getLimitedValue());

// Test G_SREM folding Integer + Mixed Int-Float cases
Optional<APInt> FoldGSremInt =
ConstantFoldBinOp(TargetOpcode::G_SREM, MIBCst1.getReg(0),
MIBCst2.getReg(0), *MRI);
std::optional<APInt> FoldGSremInt = ConstantFoldBinOp(
TargetOpcode::G_SREM, MIBCst1.getReg(0), MIBCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGSremInt.has_value());
EXPECT_EQ(7ULL, FoldGSremInt.value().getLimitedValue());
Optional<APInt> FoldGSremMix =
ConstantFoldBinOp(TargetOpcode::G_SREM, MIBCst1.getReg(0),
MIBFCst2.getReg(0), *MRI);
std::optional<APInt> FoldGSremMix = ConstantFoldBinOp(
TargetOpcode::G_SREM, MIBCst1.getReg(0), MIBFCst2.getReg(0), *MRI);
EXPECT_TRUE(FoldGSremMix.has_value());
EXPECT_EQ(16ULL, FoldGSremMix.value().getLimitedValue());
}
Expand Down
8 changes: 4 additions & 4 deletions llvm/unittests/CodeGen/GlobalISel/PatternMatchTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ TEST_F(AArch64GISelMITest, MatchIntConstantRegister) {
if (!TM)
return;
auto MIBCst = B.buildConstant(LLT::scalar(64), 42);
Optional<ValueAndVReg> Src0;
std::optional<ValueAndVReg> Src0;
bool match = mi_match(MIBCst.getReg(0), *MRI, m_GCst(Src0));
EXPECT_TRUE(match);
EXPECT_EQ(Src0->VReg, MIBCst.getReg(0));
Expand Down Expand Up @@ -700,8 +700,8 @@ TEST_F(AArch64GISelMITest, MatchFPOrIntConst) {

Register IntOne = B.buildConstant(LLT::scalar(64), 1).getReg(0);
Register FPOne = B.buildFConstant(LLT::scalar(64), 1.0).getReg(0);
Optional<ValueAndVReg> ValReg;
Optional<FPValueAndVReg> FValReg;
std::optional<ValueAndVReg> ValReg;
std::optional<FPValueAndVReg> FValReg;

EXPECT_TRUE(mi_match(IntOne, *MRI, m_GCst(ValReg)));
EXPECT_EQ(IntOne, ValReg->VReg);
Expand All @@ -723,7 +723,7 @@ TEST_F(AArch64GISelMITest, MatchConstantSplat) {
Register FPOne = B.buildFConstant(s64, 1.0).getReg(0);
Register FPZero = B.buildFConstant(s64, 0.0).getReg(0);
Register Undef = B.buildUndef(s64).getReg(0);
Optional<FPValueAndVReg> FValReg;
std::optional<FPValueAndVReg> FValReg;

// GFCstOrSplatGFCstMatch allows undef as part of splat. Undef often comes
// from padding to legalize into available operation and then ignore added
Expand Down
30 changes: 15 additions & 15 deletions llvm/unittests/CodeGen/SelectionDAGAddressAnalysisTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ TEST_F(SelectionDAGAddressAnalysisTest, sameFrameObject) {
SDValue Index = DAG->getMemBasePlusOffset(FIPtr, Offset, Loc);
SDValue Store = DAG->getStore(DAG->getEntryNode(), Loc, Value, Index,
PtrInfo.getWithOffset(Offset));
Optional<int64_t> NumBytes = MemoryLocation::getSizeOrUnknown(
std::optional<int64_t> NumBytes = MemoryLocation::getSizeOrUnknown(
cast<StoreSDNode>(Store)->getMemoryVT().getStoreSize());

bool IsAlias;
Expand All @@ -137,7 +137,7 @@ TEST_F(SelectionDAGAddressAnalysisTest, sameFrameObjectUnknownSize) {
// Maybe unlikely that BaseIndexOffset::computeAliasing is used with the
// optional NumBytes being unset like in this test, but it would be confusing
// if that function determined IsAlias=false here.
Optional<int64_t> NumBytes;
std::optional<int64_t> NumBytes;

bool IsAlias;
bool IsValid = BaseIndexOffset::computeAliasing(
Expand Down Expand Up @@ -165,9 +165,9 @@ TEST_F(SelectionDAGAddressAnalysisTest, noAliasingFrameObjects) {
PtrInfo.getWithOffset(Offset0));
SDValue Store1 = DAG->getStore(DAG->getEntryNode(), Loc, Value, Index1,
PtrInfo.getWithOffset(Offset1));
Optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
std::optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
cast<StoreSDNode>(Store0)->getMemoryVT().getStoreSize());
Optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
std::optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
cast<StoreSDNode>(Store1)->getMemoryVT().getStoreSize());

bool IsAlias;
Expand Down Expand Up @@ -195,9 +195,9 @@ TEST_F(SelectionDAGAddressAnalysisTest, unknownSizeFrameObjects) {
DAG->getStore(DAG->getEntryNode(), Loc, Value, FIPtr, PtrInfo);
SDValue Store1 = DAG->getStore(DAG->getEntryNode(), Loc, Value, Index1,
MachinePointerInfo(PtrInfo.getAddrSpace()));
Optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
std::optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
cast<StoreSDNode>(Store0)->getMemoryVT().getStoreSize());
Optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
std::optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
cast<StoreSDNode>(Store1)->getMemoryVT().getStoreSize());

bool IsAlias;
Expand All @@ -220,15 +220,15 @@ TEST_F(SelectionDAGAddressAnalysisTest, globalWithFrameObject) {
SDValue Index = DAG->getMemBasePlusOffset(FIPtr, Offset, Loc);
SDValue Store = DAG->getStore(DAG->getEntryNode(), Loc, Value, Index,
PtrInfo.getWithOffset(Offset));
Optional<int64_t> NumBytes = MemoryLocation::getSizeOrUnknown(
std::optional<int64_t> NumBytes = MemoryLocation::getSizeOrUnknown(
cast<StoreSDNode>(Store)->getMemoryVT().getStoreSize());
EVT GTy = DAG->getTargetLoweringInfo().getValueType(DAG->getDataLayout(),
G->getType());
SDValue GValue = DAG->getConstant(0, Loc, GTy);
SDValue GAddr = DAG->getGlobalAddress(G, Loc, GTy);
SDValue GStore = DAG->getStore(DAG->getEntryNode(), Loc, GValue, GAddr,
MachinePointerInfo(G, 0));
Optional<int64_t> GNumBytes = MemoryLocation::getSizeOrUnknown(
std::optional<int64_t> GNumBytes = MemoryLocation::getSizeOrUnknown(
cast<StoreSDNode>(GStore)->getMemoryVT().getStoreSize());

bool IsAlias;
Expand All @@ -248,7 +248,7 @@ TEST_F(SelectionDAGAddressAnalysisTest, globalWithAliasedGlobal) {
SDValue GAddr = DAG->getGlobalAddress(G, Loc, GTy);
SDValue GStore = DAG->getStore(DAG->getEntryNode(), Loc, GValue, GAddr,
MachinePointerInfo(G, 0));
Optional<int64_t> GNumBytes = MemoryLocation::getSizeOrUnknown(
std::optional<int64_t> GNumBytes = MemoryLocation::getSizeOrUnknown(
cast<StoreSDNode>(GStore)->getMemoryVT().getStoreSize());

SDValue AliasedGValue = DAG->getConstant(1, Loc, GTy);
Expand Down Expand Up @@ -290,9 +290,9 @@ TEST_F(SelectionDAGAddressAnalysisTest, fixedSizeFrameObjectsWithinDiff) {
PtrInfo.getWithOffset(Offset0));
SDValue Store1 = DAG->getStore(DAG->getEntryNode(), Loc, Value1, Index1,
PtrInfo.getWithOffset(Offset1));
Optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
std::optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
cast<StoreSDNode>(Store0)->getMemoryVT().getStoreSize());
Optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
std::optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
cast<StoreSDNode>(Store1)->getMemoryVT().getStoreSize());

bool IsAlias;
Expand Down Expand Up @@ -331,9 +331,9 @@ TEST_F(SelectionDAGAddressAnalysisTest, fixedSizeFrameObjectsOutOfDiff) {
PtrInfo.getWithOffset(Offset0));
SDValue Store1 = DAG->getStore(DAG->getEntryNode(), Loc, Value1, Index1,
PtrInfo.getWithOffset(Offset1));
Optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
std::optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
cast<StoreSDNode>(Store0)->getMemoryVT().getStoreSize());
Optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
std::optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
cast<StoreSDNode>(Store1)->getMemoryVT().getStoreSize());

bool IsAlias;
Expand Down Expand Up @@ -365,9 +365,9 @@ TEST_F(SelectionDAGAddressAnalysisTest, twoFixedStackObjects) {
PtrInfo0.getWithOffset(Offset0));
SDValue Store1 = DAG->getStore(DAG->getEntryNode(), Loc, Value1, Index1,
PtrInfo1.getWithOffset(Offset0));
Optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
std::optional<int64_t> NumBytes0 = MemoryLocation::getSizeOrUnknown(
cast<StoreSDNode>(Store0)->getMemoryVT().getStoreSize());
Optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
std::optional<int64_t> NumBytes1 = MemoryLocation::getSizeOrUnknown(
cast<StoreSDNode>(Store1)->getMemoryVT().getStoreSize());

bool IsAlias;
Expand Down