27 changes: 14 additions & 13 deletions mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,8 @@ static void constifyIndexValues(
ofr.get<Attribute>().cast<IntegerAttr>().getInt());
continue;
}
Optional<int64_t> maybeConstant = getConstantIntValue(ofr.get<Value>());
std::optional<int64_t> maybeConstant =
getConstantIntValue(ofr.get<Value>());
if (maybeConstant)
ofr = builder.getIndexAttr(*maybeConstant);
}
Expand Down Expand Up @@ -458,7 +459,7 @@ ParseResult AllocaScopeOp::parse(OpAsmParser &parser, OperationState &result) {
}

void AllocaScopeOp::getSuccessorRegions(
Optional<unsigned> index, ArrayRef<Attribute> operands,
std::optional<unsigned> index, ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
if (index) {
regions.push_back(RegionSuccessor(getResults()));
Expand Down Expand Up @@ -922,7 +923,7 @@ void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
build(builder, result, source, indexValue);
}

Optional<int64_t> DimOp::getConstantIndex() {
std::optional<int64_t> DimOp::getConstantIndex() {
return getConstantIntValue(getIndex());
}

Expand All @@ -942,7 +943,7 @@ Speculation::Speculatability DimOp::getSpeculatability() {

LogicalResult DimOp::verify() {
// Assume unknown index to be in range.
Optional<int64_t> index = getConstantIndex();
std::optional<int64_t> index = getConstantIndex();
if (!index)
return success();

Expand Down Expand Up @@ -977,7 +978,7 @@ static std::map<int64_t, unsigned> getNumOccurences(ArrayRef<int64_t> vals) {
/// This accounts for cases where there are multiple unit-dims, but only a
/// subset of those are dropped. For MemRefTypes these can be disambiguated
/// using the strides. If a dimension is dropped the stride must be dropped too.
static llvm::Optional<llvm::SmallBitVector>
static std::optional<llvm::SmallBitVector>
computeMemRefRankReductionMask(MemRefType originalType, MemRefType reducedType,
ArrayRef<OpFoldResult> sizes) {
llvm::SmallBitVector unusedDims(originalType.getRank());
Expand Down Expand Up @@ -1049,7 +1050,7 @@ computeMemRefRankReductionMask(MemRefType originalType, MemRefType reducedType,
llvm::SmallBitVector SubViewOp::getDroppedDims() {
MemRefType sourceType = getSourceType();
MemRefType resultType = getType();
llvm::Optional<llvm::SmallBitVector> unusedDims =
std::optional<llvm::SmallBitVector> unusedDims =
computeMemRefRankReductionMask(sourceType, resultType, getMixedSizes());
assert(unusedDims && "unable to find unused dims of subview");
return *unusedDims;
Expand Down Expand Up @@ -1364,7 +1365,7 @@ void ExtractAlignedPointerAsIndexOp::getAsmResultNames(
/// The number and type of the results are inferred from the
/// shape of the source.
LogicalResult ExtractStridedMetadataOp::inferReturnTypes(
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
ExtractStridedMetadataOpAdaptor extractAdaptor(operands, attributes, regions);
Expand Down Expand Up @@ -1625,7 +1626,7 @@ LogicalResult GlobalOp::verify() {
}
}

if (Optional<uint64_t> alignAttr = getAlignment()) {
if (std::optional<uint64_t> alignAttr = getAlignment()) {
uint64_t alignment = *alignAttr;

if (!llvm::isPowerOf2_64(alignment))
Expand Down Expand Up @@ -2610,7 +2611,7 @@ Type SubViewOp::inferRankReducedResultType(ArrayRef<int64_t> resultShape,
return inferredType;

// Compute which dimensions are dropped.
Optional<llvm::SmallDenseSet<unsigned>> dimsToProject =
std::optional<llvm::SmallDenseSet<unsigned>> dimsToProject =
computeRankReductionMask(inferredType.getShape(), resultShape);
assert(dimsToProject.has_value() && "invalid rank reduction");

Expand Down Expand Up @@ -2887,7 +2888,7 @@ static MemRefType getCanonicalSubViewResultType(
auto nonRankReducedType = SubViewOp::inferResultType(sourceType, mixedOffsets,
mixedSizes, mixedStrides)
.cast<MemRefType>();
llvm::Optional<llvm::SmallBitVector> unusedDims =
std::optional<llvm::SmallBitVector> unusedDims =
computeMemRefRankReductionMask(currentSourceType, currentResultType,
mixedSizes);
// Return nullptr as failure mode.
Expand Down Expand Up @@ -2970,22 +2971,22 @@ static bool isTrivialSubViewOp(SubViewOp subViewOp) {

// Check offsets are zero.
if (llvm::any_of(mixedOffsets, [](OpFoldResult ofr) {
Optional<int64_t> intValue = getConstantIntValue(ofr);
std::optional<int64_t> intValue = getConstantIntValue(ofr);
return !intValue || intValue.value() != 0;
}))
return false;

// Check strides are one.
if (llvm::any_of(mixedStrides, [](OpFoldResult ofr) {
Optional<int64_t> intValue = getConstantIntValue(ofr);
std::optional<int64_t> intValue = getConstantIntValue(ofr);
return !intValue || intValue.value() != 1;
}))
return false;

// Check all size values are static and matches the (static) source shape.
ArrayRef<int64_t> sourceShape = subViewOp.getSourceType().getShape();
for (const auto &size : llvm::enumerate(mixedSizes)) {
Optional<int64_t> intValue = getConstantIntValue(size.value());
std::optional<int64_t> intValue = getConstantIntValue(size.value());
if (!intValue || *intValue != sourceShape[size.index()])
return false;
}
Expand Down
9 changes: 4 additions & 5 deletions mlir/lib/Dialect/MemRef/Transforms/MultiBuffer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -98,9 +98,9 @@ FailureOr<memref::AllocOp> mlir::memref::multiBuffer(memref::AllocOp allocOp,
}
if (!candidateLoop)
return failure();
llvm::Optional<Value> inductionVar = candidateLoop.getSingleInductionVar();
llvm::Optional<OpFoldResult> lowerBound = candidateLoop.getSingleLowerBound();
llvm::Optional<OpFoldResult> singleStep = candidateLoop.getSingleStep();
std::optional<Value> inductionVar = candidateLoop.getSingleInductionVar();
std::optional<OpFoldResult> lowerBound = candidateLoop.getSingleLowerBound();
std::optional<OpFoldResult> singleStep = candidateLoop.getSingleStep();
if (!inductionVar || !lowerBound || !singleStep)
return failure();

Expand All @@ -125,13 +125,12 @@ FailureOr<memref::AllocOp> mlir::memref::multiBuffer(memref::AllocOp allocOp,
AffineExpr induc = getAffineDimExpr(0, allocOp.getContext());
unsigned dimCount = 1;
auto getAffineExpr = [&](OpFoldResult e) -> AffineExpr {
if (Optional<int64_t> constValue = getConstantIntValue(e)) {
if (std::optional<int64_t> constValue = getConstantIntValue(e)) {
return getAffineConstantExpr(*constValue, allocOp.getContext());
}
auto value = getOrCreateValue(e, builder, candidateLoop->getLoc());
operands.push_back(value);
return getAffineDimExpr(dimCount++, allocOp.getContext());

};
auto init = getAffineExpr(*lowerBound);
auto step = getAffineExpr(*singleStep);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ struct DimOfShapedTypeOpInterface : public OpRewritePattern<OpTy> {
if (!shapedTypeOp)
return failure();

Optional<int64_t> dimIndex = dimOp.getConstantIndex();
std::optional<int64_t> dimIndex = dimOp.getConstantIndex();
if (!dimIndex)
return failure();

Expand Down Expand Up @@ -88,7 +88,7 @@ struct DimOfReifyRankedShapedTypeOpInterface : public OpRewritePattern<OpTy> {
if (!rankedShapeTypeOp)
return failure();

Optional<int64_t> dimIndex = dimOp.getConstantIndex();
std::optional<int64_t> dimIndex = dimOp.getConstantIndex();
if (!dimIndex)
return failure();

Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/Dialect/NVGPU/Transforms/OptimizeSharedMemory.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ getShmReadAndWriteOps(Operation *parentOp, Value shmMemRef,
MemoryEffectOpInterface iface = dyn_cast<MemoryEffectOpInterface>(op);
if (!iface)
return;
Optional<MemoryEffects::EffectInstance> effect =
std::optional<MemoryEffects::EffectInstance> effect =
iface.getEffectOnValue<MemoryEffects::Read>(shmMemRef);
if (effect) {
readOps.push_back(op);
Expand Down
20 changes: 10 additions & 10 deletions mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ static ParseResult parseClauseAttr(AsmParser &parser, ClauseAttr &attr) {
SMLoc loc = parser.getCurrentLocation();
if (parser.parseKeyword(&enumStr))
return failure();
if (Optional<ClauseT> enumValue = symbolizeEnum<ClauseT>(enumStr)) {
if (std::optional<ClauseT> enumValue = symbolizeEnum<ClauseT>(enumStr)) {
attr = ClauseAttr::get(parser.getContext(), *enumValue);
return success();
}
Expand Down Expand Up @@ -173,9 +173,9 @@ static void printLinearClause(OpAsmPrinter &p, Operation *op,
//===----------------------------------------------------------------------===//
// Parser, verifier and printer for Aligned Clause
//===----------------------------------------------------------------------===//
static LogicalResult verifyAlignedClause(Operation *op,
Optional<ArrayAttr> alignmentValues,
OperandRange alignedVariables) {
static LogicalResult
verifyAlignedClause(Operation *op, std::optional<ArrayAttr> alignmentValues,
OperandRange alignedVariables) {
// Check if number of alignment values equals to number of aligned variables
if (!alignedVariables.empty()) {
if (!alignmentValues || alignmentValues->size() != alignedVariables.size())
Expand Down Expand Up @@ -236,7 +236,7 @@ static ParseResult parseAlignedClause(
static void printAlignedClause(OpAsmPrinter &p, Operation *op,
ValueRange alignedVars,
TypeRange alignedVarTypes,
Optional<ArrayAttr> alignmentValues) {
std::optional<ArrayAttr> alignmentValues) {
for (unsigned i = 0; i < alignedVars.size(); ++i) {
if (i != 0)
p << ", ";
Expand Down Expand Up @@ -293,11 +293,11 @@ verifyScheduleModifiers(OpAsmParser &parser,
static ParseResult parseScheduleClause(
OpAsmParser &parser, ClauseScheduleKindAttr &scheduleAttr,
ScheduleModifierAttr &scheduleModifier, UnitAttr &simdModifier,
Optional<OpAsmParser::UnresolvedOperand> &chunkSize, Type &chunkType) {
std::optional<OpAsmParser::UnresolvedOperand> &chunkSize, Type &chunkType) {
StringRef keyword;
if (parser.parseKeyword(&keyword))
return failure();
llvm::Optional<mlir::omp::ClauseScheduleKind> schedule =
std::optional<mlir::omp::ClauseScheduleKind> schedule =
symbolizeClauseScheduleKind(keyword);
if (!schedule)
return parser.emitError(parser.getNameLoc()) << " expected schedule kind";
Expand Down Expand Up @@ -334,7 +334,7 @@ static ParseResult parseScheduleClause(

if (!modifiers.empty()) {
SMLoc loc = parser.getCurrentLocation();
if (Optional<ScheduleModifier> mod =
if (std::optional<ScheduleModifier> mod =
symbolizeScheduleModifier(modifiers[0])) {
scheduleModifier = ScheduleModifierAttr::get(parser.getContext(), *mod);
} else {
Expand Down Expand Up @@ -396,7 +396,7 @@ parseReductionVarList(OpAsmParser &parser,
static void printReductionVarList(OpAsmPrinter &p, Operation *op,
OperandRange reductionVars,
TypeRange reductionTypes,
Optional<ArrayAttr> reductions) {
std::optional<ArrayAttr> reductions) {
for (unsigned i = 0, e = reductions->size(); i < e; ++i) {
if (i != 0)
p << ", ";
Expand All @@ -407,7 +407,7 @@ static void printReductionVarList(OpAsmPrinter &p, Operation *op,

/// Verifies Reduction Clause
static LogicalResult verifyReductionVarList(Operation *op,
Optional<ArrayAttr> reductions,
std::optional<ArrayAttr> reductions,
OperandRange reductionVars) {
if (!reductionVars.empty()) {
if (!reductions || reductions->size() != reductionVars.size())
Expand Down
8 changes: 4 additions & 4 deletions mlir/lib/Dialect/PDL/IR/PDL.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ LogicalResult ApplyNativeRewriteOp::verify() {

LogicalResult AttributeOp::verify() {
Value attrType = getValueType();
Optional<Attribute> attrValue = getValue();
std::optional<Attribute> attrValue = getValue();

if (!attrValue) {
if (isa<RewriteOp>((*this)->getParentOp()))
Expand Down Expand Up @@ -203,7 +203,7 @@ static LogicalResult verifyResultTypesAreInferrable(OperationOp op,
if (resultTypes.empty()) {
// If we don't know the concrete operation, don't attempt any verification.
// We can't make assumptions if we don't know the concrete operation.
Optional<StringRef> rawOpName = op.getOpName();
std::optional<StringRef> rawOpName = op.getOpName();
if (!rawOpName)
return success();
Optional<RegisteredOperationName> opName =
Expand Down Expand Up @@ -290,15 +290,15 @@ LogicalResult OperationOp::verify() {
}

bool OperationOp::hasTypeInference() {
if (Optional<StringRef> rawOpName = getOpName()) {
if (std::optional<StringRef> rawOpName = getOpName()) {
OperationName opName(*rawOpName, getContext());
return opName.hasInterface<InferTypeOpInterface>();
}
return false;
}

bool OperationOp::mightHaveTypeInference() {
if (Optional<StringRef> rawOpName = getOpName()) {
if (std::optional<StringRef> rawOpName = getOpName()) {
OperationName opName(*rawOpName, getContext());
return opName.mightHaveInterface<InferTypeOpInterface>();
}
Expand Down
36 changes: 19 additions & 17 deletions mlir/lib/Dialect/SCF/IR/SCF.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ void ExecuteRegionOp::getCanonicalizationPatterns(RewritePatternSet &results,
/// correspond to a constant value for each operand, or null if that operand is
/// not a constant.
void ExecuteRegionOp::getSuccessorRegions(
Optional<unsigned> index, ArrayRef<Attribute> operands,
std::optional<unsigned> index, ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
// If the predecessor is the ExecuteRegionOp, branch into the body.
if (!index) {
Expand All @@ -265,7 +265,7 @@ void ExecuteRegionOp::getSuccessorRegions(
//===----------------------------------------------------------------------===//

MutableOperandRange
ConditionOp::getMutableSuccessorOperands(Optional<unsigned> index) {
ConditionOp::getMutableSuccessorOperands(std::optional<unsigned> index) {
// Pass all operands except the condition to the successor region.
return getArgsMutable();
}
Expand Down Expand Up @@ -352,17 +352,19 @@ LogicalResult ForOp::verifyRegions() {
return success();
}

Optional<Value> ForOp::getSingleInductionVar() { return getInductionVar(); }
std::optional<Value> ForOp::getSingleInductionVar() {
return getInductionVar();
}

Optional<OpFoldResult> ForOp::getSingleLowerBound() {
std::optional<OpFoldResult> ForOp::getSingleLowerBound() {
return OpFoldResult(getLowerBound());
}

Optional<OpFoldResult> ForOp::getSingleStep() {
std::optional<OpFoldResult> ForOp::getSingleStep() {
return OpFoldResult(getStep());
}

Optional<OpFoldResult> ForOp::getSingleUpperBound() {
std::optional<OpFoldResult> ForOp::getSingleUpperBound() {
return OpFoldResult(getUpperBound());
}

Expand Down Expand Up @@ -476,7 +478,7 @@ ForOp mlir::scf::getForInductionVarOwner(Value val) {
/// correspond to the loop iterator operands, i.e., those excluding the
/// induction variable. LoopOp only has one region, so 0 is the only valid value
/// for `index`.
OperandRange ForOp::getSuccessorEntryOperands(Optional<unsigned> index) {
OperandRange ForOp::getSuccessorEntryOperands(std::optional<unsigned> index) {
assert(index && *index == 0 && "invalid region index");

// The initial operands map to the loop arguments after the induction
Expand All @@ -489,7 +491,7 @@ OperandRange ForOp::getSuccessorEntryOperands(Optional<unsigned> index) {
/// during the flow of control. `operands` is a set of optional attributes that
/// correspond to a constant value for each operand, or null if that operand is
/// not a constant.
void ForOp::getSuccessorRegions(Optional<unsigned> index,
void ForOp::getSuccessorRegions(std::optional<unsigned> index,
ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
// If the predecessor is the ForOp, branch into the body using the iterator
Expand Down Expand Up @@ -721,7 +723,7 @@ struct ForOpIterArgsFolder : public OpRewritePattern<scf::ForOp> {
/// Util function that tries to compute a constant diff between u and l.
/// Returns std::nullopt when the difference between two AffineValueMap is
/// dynamic.
static Optional<int64_t> computeConstDiff(Value l, Value u) {
static std::optional<int64_t> computeConstDiff(Value l, Value u) {
IntegerAttr clb, cub;
if (matchPattern(l, m_Constant(&clb)) && matchPattern(u, m_Constant(&cub))) {
llvm::APInt lbValue = clb.getValue();
Expand Down Expand Up @@ -754,7 +756,7 @@ struct SimplifyTrivialLoops : public OpRewritePattern<ForOp> {
return success();
}

Optional<int64_t> diff =
std::optional<int64_t> diff =
computeConstDiff(op.getLowerBound(), op.getUpperBound());
if (!diff)
return failure();
Expand All @@ -765,7 +767,7 @@ struct SimplifyTrivialLoops : public OpRewritePattern<ForOp> {
return success();
}

llvm::Optional<llvm::APInt> maybeStepValue = op.getConstantStep();
std::optional<llvm::APInt> maybeStepValue = op.getConstantStep();
if (!maybeStepValue)
return failure();

Expand Down Expand Up @@ -1068,7 +1070,7 @@ void ForOp::getCanonicalizationPatterns(RewritePatternSet &results,
LastTensorLoadCanonicalization, ForOpTensorCastFolder>(context);
}

Optional<APInt> ForOp::getConstantStep() {
std::optional<APInt> ForOp::getConstantStep() {
IntegerAttr step;
if (matchPattern(getStep(), m_Constant(&step)))
return step.getValue();
Expand Down Expand Up @@ -1212,7 +1214,7 @@ ParseResult ForeachThreadOp::parse(OpAsmParser &parser,
void ForeachThreadOp::build(mlir::OpBuilder &builder,
mlir::OperationState &result, ValueRange outputs,
ValueRange numThreads,
Optional<ArrayAttr> mapping) {
std::optional<ArrayAttr> mapping) {
result.addOperands(numThreads);
result.addOperands(outputs);
if (mapping.has_value()) {
Expand Down Expand Up @@ -1565,7 +1567,7 @@ void IfOp::print(OpAsmPrinter &p) {
/// during the flow of control. `operands` is a set of optional attributes that
/// correspond to a constant value for each operand, or null if that operand is
/// not a constant.
void IfOp::getSuccessorRegions(Optional<unsigned> index,
void IfOp::getSuccessorRegions(std::optional<unsigned> index,
ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
// The `then` and the `else` region branch back to the parent operation.
Expand Down Expand Up @@ -2723,7 +2725,7 @@ void WhileOp::build(::mlir::OpBuilder &odsBuilder,
afterBuilder(odsBuilder, odsState.location, afterBlock->getArguments());
}

OperandRange WhileOp::getSuccessorEntryOperands(Optional<unsigned> index) {
OperandRange WhileOp::getSuccessorEntryOperands(std::optional<unsigned> index) {
assert(index && *index == 0 &&
"WhileOp is expected to branch only to the first region");

Expand All @@ -2746,7 +2748,7 @@ Block::BlockArgListType WhileOp::getAfterArguments() {
return getAfter().front().getArguments();
}

void WhileOp::getSuccessorRegions(Optional<unsigned> index,
void WhileOp::getSuccessorRegions(std::optional<unsigned> index,
ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
// The parent op always branches to the condition region.
Expand Down Expand Up @@ -3524,7 +3526,7 @@ Block &scf::IndexSwitchOp::getCaseBlock(unsigned idx) {
}

void IndexSwitchOp::getSuccessorRegions(
Optional<unsigned> index, ArrayRef<Attribute> operands,
std::optional<unsigned> index, ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &successors) {
// All regions branch back to the parent op.
if (index) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -453,8 +453,8 @@ static FailureOr<BaseMemRefType> computeLoopRegionIterArgBufferType(

/// Return `true` if the given loop may have 0 iterations.
bool mayHaveZeroIterations(scf::ForOp forOp) {
Optional<int64_t> lb = getConstantIntValue(forOp.getLowerBound());
Optional<int64_t> ub = getConstantIntValue(forOp.getUpperBound());
std::optional<int64_t> lb = getConstantIntValue(forOp.getLowerBound());
std::optional<int64_t> ub = getConstantIntValue(forOp.getUpperBound());
if (!lb.has_value() || !ub.has_value())
return true;
return *ub <= *lb;
Expand Down Expand Up @@ -1055,7 +1055,7 @@ struct YieldOpInterface
bool mayHaveZeroIterations(scf::ForeachThreadOp foreachThreadOp) {
int64_t p = 1;
for (Value v : foreachThreadOp.getNumThreads()) {
if (Optional<int64_t> c = getConstantIntValue(v)) {
if (std::optional<int64_t> c = getConstantIntValue(v)) {
p *= *c;
} else {
return true;
Expand Down
14 changes: 7 additions & 7 deletions mlir/lib/Dialect/SCF/Transforms/TileUsingInterface.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -66,13 +66,13 @@ fillInterchangeVector(ArrayRef<int64_t> interchangeVector,

// Check if `stride` evenly divides the trip count `size - offset`.
static bool tileDividesIterationDomain(Range loopRange) {
Optional<int64_t> offsetAsInt = getConstantIntValue(loopRange.offset);
std::optional<int64_t> offsetAsInt = getConstantIntValue(loopRange.offset);
if (!offsetAsInt)
return false;
Optional<int64_t> sizeAsInt = getConstantIntValue(loopRange.size);
std::optional<int64_t> sizeAsInt = getConstantIntValue(loopRange.size);
if (!sizeAsInt)
return false;
Optional<int64_t> strideAsInt = getConstantIntValue(loopRange.stride);
std::optional<int64_t> strideAsInt = getConstantIntValue(loopRange.stride);
if (!strideAsInt)
return false;
return ((sizeAsInt.value() - offsetAsInt.value()) % strideAsInt.value() == 0);
Expand All @@ -83,7 +83,7 @@ static bool tileDividesIterationDomain(Range loopRange) {
static OpFoldResult getBoundedTileSize(OpBuilder &b, Location loc,
Range loopRange, Value iv,
Value tileSize) {
Optional<int64_t> ts = getConstantIntValue(tileSize);
std::optional<int64_t> ts = getConstantIntValue(tileSize);
if (ts && ts.value() == 1)
return getAsOpFoldResult(tileSize);

Expand Down Expand Up @@ -484,10 +484,10 @@ mlir::scf::tileReductionUsingScf(PatternRewriter &b,
/// `iter_args` of the outer most that is encountered. Traversing the iter_args
/// indicates that this is a destination operand of the consumer. If there was
/// no loop traversal needed, the second value of the returned tuple is empty.
static std::tuple<OpResult, Optional<OpOperand *>>
static std::tuple<OpResult, std::optional<OpOperand *>>
getUntiledProducerFromSliceSource(OpOperand *source,
ArrayRef<scf::ForOp> loops) {
Optional<OpOperand *> destinationIterArg;
std::optional<OpOperand *> destinationIterArg;
auto loopIt = loops.rbegin();
while (auto iterArg = source->get().dyn_cast<BlockArgument>()) {
scf::ForOp loop = *loopIt;
Expand Down Expand Up @@ -633,7 +633,7 @@ mlir::scf::tileConsumerAndFuseProducerGreedilyUsingSCFForOp(
// TODO: This can be modeled better if the `DestinationStyleOpInterface`.
// Update to use that when it does become available.
scf::ForOp outerMostLoop = tileAndFuseResult.loops.front();
Optional<unsigned> iterArgNumber;
std::optional<unsigned> iterArgNumber;
if (destinationIterArg) {
iterArgNumber = outerMostLoop.getIterArgNumberForOpOperand(
*destinationIterArg.value());
Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Dialect/SCF/Utils/AffineCanonicalizationUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -218,8 +218,8 @@ addLoopRangeConstraints(FlatAffineValueConstraints &constraints, Value iv,
: constraints.appendSymbolVar(/*num=*/1);

// If loop lower/upper bounds are constant: Add EQ constraint.
Optional<int64_t> lbInt = getConstantIntValue(lb);
Optional<int64_t> ubInt = getConstantIntValue(ub);
std::optional<int64_t> lbInt = getConstantIntValue(lb);
std::optional<int64_t> ubInt = getConstantIntValue(ub);
if (lbInt)
constraints.addBound(IntegerPolyhedron::EQ, symLb, *lbInt);
if (ubInt)
Expand Down
36 changes: 18 additions & 18 deletions mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -142,15 +142,15 @@ std::string SPIRVDialect::getAttributeName(Decoration decoration) {

// Forward declarations.
template <typename ValTy>
static Optional<ValTy> parseAndVerify(SPIRVDialect const &dialect,
DialectAsmParser &parser);
static std::optional<ValTy> parseAndVerify(SPIRVDialect const &dialect,
DialectAsmParser &parser);
template <>
Optional<Type> parseAndVerify<Type>(SPIRVDialect const &dialect,
DialectAsmParser &parser);
std::optional<Type> parseAndVerify<Type>(SPIRVDialect const &dialect,
DialectAsmParser &parser);

template <>
Optional<unsigned> parseAndVerify<unsigned>(SPIRVDialect const &dialect,
DialectAsmParser &parser);
std::optional<unsigned> parseAndVerify<unsigned>(SPIRVDialect const &dialect,
DialectAsmParser &parser);

static Type parseAndVerifyType(SPIRVDialect const &dialect,
DialectAsmParser &parser) {
Expand Down Expand Up @@ -264,7 +264,7 @@ static LogicalResult parseOptionalArrayStride(const SPIRVDialect &dialect,
return failure();

SMLoc strideLoc = parser.getCurrentLocation();
Optional<unsigned> optStride = parseAndVerify<unsigned>(dialect, parser);
std::optional<unsigned> optStride = parseAndVerify<unsigned>(dialect, parser);
if (!optStride)
return failure();

Expand Down Expand Up @@ -474,8 +474,8 @@ static Type parseMatrixType(SPIRVDialect const &dialect,
// Specialize this function to parse each of the parameters that define an
// ImageType. By default it assumes this is an enum type.
template <typename ValTy>
static Optional<ValTy> parseAndVerify(SPIRVDialect const &dialect,
DialectAsmParser &parser) {
static std::optional<ValTy> parseAndVerify(SPIRVDialect const &dialect,
DialectAsmParser &parser) {
StringRef enumSpec;
SMLoc enumLoc = parser.getCurrentLocation();
if (parser.parseKeyword(&enumSpec)) {
Expand All @@ -489,8 +489,8 @@ static Optional<ValTy> parseAndVerify(SPIRVDialect const &dialect,
}

template <>
Optional<Type> parseAndVerify<Type>(SPIRVDialect const &dialect,
DialectAsmParser &parser) {
std::optional<Type> parseAndVerify<Type>(SPIRVDialect const &dialect,
DialectAsmParser &parser) {
// TODO: Further verify that the element type can be sampled
auto ty = parseAndVerifyType(dialect, parser);
if (!ty)
Expand All @@ -499,17 +499,17 @@ Optional<Type> parseAndVerify<Type>(SPIRVDialect const &dialect,
}

template <typename IntTy>
static Optional<IntTy> parseAndVerifyInteger(SPIRVDialect const &dialect,
DialectAsmParser &parser) {
static std::optional<IntTy> parseAndVerifyInteger(SPIRVDialect const &dialect,
DialectAsmParser &parser) {
IntTy offsetVal = std::numeric_limits<IntTy>::max();
if (parser.parseInteger(offsetVal))
return std::nullopt;
return offsetVal;
}

template <>
Optional<unsigned> parseAndVerify<unsigned>(SPIRVDialect const &dialect,
DialectAsmParser &parser) {
std::optional<unsigned> parseAndVerify<unsigned>(SPIRVDialect const &dialect,
DialectAsmParser &parser) {
return parseAndVerifyInteger<unsigned>(dialect, parser);
}

Expand All @@ -520,7 +520,7 @@ namespace {
// (termination condition) needs partial specialization.
template <typename ParseType, typename... Args>
struct ParseCommaSeparatedList {
Optional<std::tuple<ParseType, Args...>>
std::optional<std::tuple<ParseType, Args...>>
operator()(SPIRVDialect const &dialect, DialectAsmParser &parser) const {
auto parseVal = parseAndVerify<ParseType>(dialect, parser);
if (!parseVal)
Expand All @@ -541,8 +541,8 @@ struct ParseCommaSeparatedList {
// specs to parse the last element of the list.
template <typename ParseType>
struct ParseCommaSeparatedList<ParseType> {
Optional<std::tuple<ParseType>> operator()(SPIRVDialect const &dialect,
DialectAsmParser &parser) const {
std::optional<std::tuple<ParseType>>
operator()(SPIRVDialect const &dialect, DialectAsmParser &parser) const {
if (auto value = parseAndVerify<ParseType>(dialect, parser))
return std::tuple<ParseType>(*value);
return std::nullopt;
Expand Down
22 changes: 11 additions & 11 deletions mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -313,8 +313,8 @@ template <typename MemoryOpTy>
static void printMemoryAccessAttribute(
MemoryOpTy memoryOp, OpAsmPrinter &printer,
SmallVectorImpl<StringRef> &elidedAttrs,
Optional<spirv::MemoryAccess> memoryAccessAtrrValue = std::nullopt,
Optional<uint32_t> alignmentAttrValue = std::nullopt) {
std::optional<spirv::MemoryAccess> memoryAccessAtrrValue = std::nullopt,
std::optional<uint32_t> alignmentAttrValue = std::nullopt) {
// Print optional memory access attribute.
if (auto memAccess = (memoryAccessAtrrValue ? memoryAccessAtrrValue
: memoryOp.getMemoryAccess())) {
Expand Down Expand Up @@ -343,8 +343,8 @@ template <typename MemoryOpTy>
static void printSourceMemoryAccessAttribute(
MemoryOpTy memoryOp, OpAsmPrinter &printer,
SmallVectorImpl<StringRef> &elidedAttrs,
Optional<spirv::MemoryAccess> memoryAccessAtrrValue = std::nullopt,
Optional<uint32_t> alignmentAttrValue = std::nullopt) {
std::optional<spirv::MemoryAccess> memoryAccessAtrrValue = std::nullopt,
std::optional<uint32_t> alignmentAttrValue = std::nullopt) {

printer << ", ";

Expand Down Expand Up @@ -912,7 +912,7 @@ static ParseResult parseGroupNonUniformArithmeticOp(OpAsmParser &parser,
parser.parseOperand(valueInfo))
return failure();

Optional<OpAsmParser::UnresolvedOperand> clusterSizeInfo;
std::optional<OpAsmParser::UnresolvedOperand> clusterSizeInfo;
if (succeeded(parser.parseOptionalKeyword(kClusterSize))) {
clusterSizeInfo = OpAsmParser::UnresolvedOperand();
if (parser.parseLParen() || parser.parseOperand(*clusterSizeInfo) ||
Expand Down Expand Up @@ -3348,7 +3348,7 @@ LogicalResult spirv::MergeOp::verify() {
//===----------------------------------------------------------------------===//

void spirv::ModuleOp::build(OpBuilder &builder, OperationState &state,
Optional<StringRef> name) {
std::optional<StringRef> name) {
OpBuilder::InsertionGuard guard(builder);
builder.createBlock(state.addRegion());
if (name) {
Expand All @@ -3360,8 +3360,8 @@ void spirv::ModuleOp::build(OpBuilder &builder, OperationState &state,
void spirv::ModuleOp::build(OpBuilder &builder, OperationState &state,
spirv::AddressingModel addressingModel,
spirv::MemoryModel memoryModel,
Optional<VerCapExtAttr> vceTriple,
Optional<StringRef> name) {
std::optional<VerCapExtAttr> vceTriple,
std::optional<StringRef> name) {
state.addAttribute(
"addressing_model",
builder.getAttr<spirv::AddressingModelAttr>(addressingModel));
Expand Down Expand Up @@ -3414,7 +3414,7 @@ ParseResult spirv::ModuleOp::parse(OpAsmParser &parser,
}

void spirv::ModuleOp::print(OpAsmPrinter &printer) {
if (Optional<StringRef> name = getName()) {
if (std::optional<StringRef> name = getName()) {
printer << ' ';
printer.printSymbolName(*name);
}
Expand All @@ -3428,7 +3428,7 @@ void spirv::ModuleOp::print(OpAsmPrinter &printer) {
elidedAttrs.assign({addressingModelAttrName, memoryModelAttrName,
mlir::SymbolTable::getSymbolAttrName()});

if (Optional<spirv::VerCapExtAttr> triple = getVceTriple()) {
if (std::optional<spirv::VerCapExtAttr> triple = getVceTriple()) {
printer << " requires " << *triple;
elidedAttrs.push_back(spirv::ModuleOp::getVCETripleAttrName());
}
Expand Down Expand Up @@ -3806,7 +3806,7 @@ LogicalResult spirv::UnreachableOp::verify() {
ParseResult spirv::VariableOp::parse(OpAsmParser &parser,
OperationState &result) {
// Parse optional initializer
Optional<OpAsmParser::UnresolvedOperand> initInfo;
std::optional<OpAsmParser::UnresolvedOperand> initInfo;
if (succeeded(parser.parseOptionalKeyword("init"))) {
initInfo = OpAsmParser::UnresolvedOperand();
if (parser.parseLParen() || parser.parseOperand(*initInfo) ||
Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ static LogicalResult lowerEntryPointABIAttr(spirv::FuncOp funcOp,

// Specifies the spirv.ExecutionModeOp.
if (DenseI32ArrayAttr workgroupSizeAttr = entryPointAttr.getWorkgroupSize()) {
Optional<ArrayRef<spirv::Capability>> caps =
std::optional<ArrayRef<spirv::Capability>> caps =
spirv::getCapabilities(spirv::ExecutionMode::LocalSize);
if (!caps || targetEnv.allows(*caps)) {
builder.create<spirv::ExecutionModeOp>(funcOp.getLoc(), funcOp,
Expand All @@ -161,7 +161,7 @@ static LogicalResult lowerEntryPointABIAttr(spirv::FuncOp funcOp,
}
}
if (Optional<int> subgroupSize = entryPointAttr.getSubgroupSize()) {
Optional<ArrayRef<spirv::Capability>> caps =
std::optional<ArrayRef<spirv::Capability>> caps =
spirv::getCapabilities(spirv::ExecutionMode::SubgroupSize);
if (!caps || targetEnv.allows(*caps)) {
builder.create<spirv::ExecutionModeOp>(funcOp.getLoc(), funcOp,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,8 @@ static AliasedResourceMap collectAliasedResources(spirv::ModuleOp moduleOp) {
AliasedResourceMap aliasedResources;
moduleOp->walk([&aliasedResources](spirv::GlobalVariableOp varOp) {
if (varOp->getAttrOfType<UnitAttr>("aliased")) {
Optional<uint32_t> set = varOp.getDescriptorSet();
Optional<uint32_t> binding = varOp.getBinding();
std::optional<uint32_t> set = varOp.getDescriptorSet();
std::optional<uint32_t> binding = varOp.getBinding();
if (set && binding)
aliasedResources[{*set, *binding}].push_back(varOp);
}
Expand Down
34 changes: 17 additions & 17 deletions mlir/lib/Dialect/Shape/IR/Shape.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -335,7 +335,7 @@ void AssumingOp::getCanonicalizationPatterns(RewritePatternSet &patterns,

// See RegionBranchOpInterface in Interfaces/ControlFlowInterfaces.td
void AssumingOp::getSuccessorRegions(
Optional<unsigned> index, ArrayRef<Attribute> operands,
std::optional<unsigned> index, ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
// AssumingOp has unconditional control flow into the region and back to the
// parent, so return the correct RegionSuccessor purely based on the index
Expand Down Expand Up @@ -394,7 +394,7 @@ void AssumingOp::build(
//===----------------------------------------------------------------------===//

LogicalResult mlir::shape::AddOp::inferReturnTypes(
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
if (operands[0].getType().isa<SizeType>() ||
Expand Down Expand Up @@ -911,7 +911,7 @@ void ConstShapeOp::getCanonicalizationPatterns(RewritePatternSet &patterns,
}

LogicalResult mlir::shape::ConstShapeOp::inferReturnTypes(
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
Builder b(context);
Expand Down Expand Up @@ -1068,7 +1068,7 @@ OpFoldResult CstrRequireOp::fold(ArrayRef<Attribute> operands) {
// DimOp
//===----------------------------------------------------------------------===//

Optional<int64_t> DimOp::getConstantIndex() {
std::optional<int64_t> DimOp::getConstantIndex() {
if (auto constSizeOp = getIndex().getDefiningOp<ConstSizeOp>())
return constSizeOp.getValue().getLimitedValue();
if (auto constantOp = getIndex().getDefiningOp<arith::ConstantOp>())
Expand All @@ -1081,7 +1081,7 @@ OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
auto valShapedType = valType.dyn_cast<ShapedType>();
if (!valShapedType || !valShapedType.hasRank())
return nullptr;
Optional<int64_t> index = getConstantIndex();
std::optional<int64_t> index = getConstantIndex();
if (!index.has_value())
return nullptr;
if (index.value() >= valShapedType.getRank())
Expand All @@ -1093,7 +1093,7 @@ OpFoldResult DimOp::fold(ArrayRef<Attribute> operands) {
}

LogicalResult mlir::shape::DimOp::inferReturnTypes(
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
DimOpAdaptor dimOp(operands);
Expand Down Expand Up @@ -1141,7 +1141,7 @@ OpFoldResult DivOp::fold(ArrayRef<Attribute> operands) {
}

LogicalResult mlir::shape::DivOp::inferReturnTypes(
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
if (operands[0].getType().isa<SizeType>() ||
Expand Down Expand Up @@ -1327,7 +1327,7 @@ void FuncOp::print(OpAsmPrinter &p) {
// GetExtentOp
//===----------------------------------------------------------------------===//

Optional<int64_t> GetExtentOp::getConstantDim() {
std::optional<int64_t> GetExtentOp::getConstantDim() {
if (auto constSizeOp = getDim().getDefiningOp<ConstSizeOp>())
return constSizeOp.getValue().getLimitedValue();
if (auto constantOp = getDim().getDefiningOp<arith::ConstantOp>())
Expand All @@ -1339,7 +1339,7 @@ OpFoldResult GetExtentOp::fold(ArrayRef<Attribute> operands) {
auto elements = operands[0].dyn_cast_or_null<DenseIntElementsAttr>();
if (!elements)
return nullptr;
Optional<int64_t> dim = getConstantDim();
std::optional<int64_t> dim = getConstantDim();
if (!dim.has_value())
return nullptr;
if (dim.value() >= elements.getNumElements())
Expand All @@ -1362,7 +1362,7 @@ void GetExtentOp::build(OpBuilder &builder, OperationState &result, Value shape,
}

LogicalResult mlir::shape::GetExtentOp::inferReturnTypes(
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
inferredReturnTypes.assign({IndexType::get(context)});
Expand Down Expand Up @@ -1400,7 +1400,7 @@ OpFoldResult IsBroadcastableOp::fold(ArrayRef<Attribute> operands) {
//===----------------------------------------------------------------------===//

LogicalResult mlir::shape::MeetOp::inferReturnTypes(
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
if (operands.empty())
Expand Down Expand Up @@ -1536,7 +1536,7 @@ void shape::RankOp::getCanonicalizationPatterns(RewritePatternSet &patterns,
}

LogicalResult mlir::shape::RankOp::inferReturnTypes(
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
if (operands[0].getType().isa<ShapeType>())
Expand Down Expand Up @@ -1572,7 +1572,7 @@ OpFoldResult NumElementsOp::fold(ArrayRef<Attribute> operands) {
}

LogicalResult mlir::shape::NumElementsOp::inferReturnTypes(
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
if (operands[0].getType().isa<ShapeType>())
Expand Down Expand Up @@ -1604,7 +1604,7 @@ OpFoldResult MaxOp::fold(llvm::ArrayRef<mlir::Attribute> operands) {
}

LogicalResult mlir::shape::MaxOp::inferReturnTypes(
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
if (operands[0].getType() == operands[1].getType())
Expand Down Expand Up @@ -1636,7 +1636,7 @@ OpFoldResult MinOp::fold(llvm::ArrayRef<mlir::Attribute> operands) {
}

LogicalResult mlir::shape::MinOp::inferReturnTypes(
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
if (operands[0].getType() == operands[1].getType())
Expand Down Expand Up @@ -1673,7 +1673,7 @@ OpFoldResult MulOp::fold(ArrayRef<Attribute> operands) {
}

LogicalResult mlir::shape::MulOp::inferReturnTypes(
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
if (operands[0].getType().isa<SizeType>() ||
Expand Down Expand Up @@ -1760,7 +1760,7 @@ void ShapeOfOp::getCanonicalizationPatterns(RewritePatternSet &patterns,
}

LogicalResult mlir::shape::ShapeOfOp::inferReturnTypes(
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
if (operands[0].getType().isa<ValueShapeType>())
Expand Down
15 changes: 7 additions & 8 deletions mlir/lib/Dialect/SparseTensor/IR/SparseTensorDialect.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -333,7 +333,7 @@ IntegerType StorageSpecifierType::getSizesType() const {
}

Type StorageSpecifierType::getFieldType(StorageSpecifierKind kind,
Optional<unsigned> dim) const {
std::optional<unsigned> dim) const {
if (kind != StorageSpecifierKind::ValMemSize)
assert(dim);

Expand All @@ -344,8 +344,8 @@ Type StorageSpecifierType::getFieldType(StorageSpecifierKind kind,
}

Type StorageSpecifierType::getFieldType(StorageSpecifierKind kind,
Optional<APInt> dim) const {
Optional<unsigned> intDim = std::nullopt;
std::optional<APInt> dim) const {
std::optional<unsigned> intDim = std::nullopt;
if (dim)
intDim = dim.value().getZExtValue();
return getFieldType(kind, intDim);
Expand All @@ -369,10 +369,9 @@ static LogicalResult isMatchingWidth(Value result, unsigned width) {
return failure();
}

static LogicalResult
verifySparsifierGetterSetter(StorageSpecifierKind mdKind, Optional<APInt> dim,
TypedValue<StorageSpecifierType> md,
Operation *op) {
static LogicalResult verifySparsifierGetterSetter(
StorageSpecifierKind mdKind, std::optional<APInt> dim,
TypedValue<StorageSpecifierType> md, Operation *op) {
if (mdKind == StorageSpecifierKind::ValMemSize && dim) {
return op->emitError(
"redundant dimension argument for querying value memory size");
Expand Down Expand Up @@ -482,7 +481,7 @@ static SetStorageSpecifierOp getSpecifierSetDef(SpecifierOp op) {

OpFoldResult GetStorageSpecifierOp::fold(ArrayRef<Attribute> operands) {
StorageSpecifierKind kind = getSpecifierKind();
Optional<APInt> dim = getDim();
std::optional<APInt> dim = getDim();
for (auto op = getSpecifierSetDef(*this); op; op = getSpecifierSetDef(op))
if (kind == op.getSpecifierKind() && dim == op.getDim())
return op.getValue();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -133,9 +133,10 @@ static scf::ForOp createFor(OpBuilder &builder, Location loc, Value upper,
/// Gets the dimension size for the given sparse tensor at the given
/// original dimension 'dim'. Returns std::nullopt if no sparse encoding is
/// attached to the given tensor type.
static Optional<Value> sizeFromTensorAtDim(OpBuilder &builder, Location loc,
SparseTensorDescriptor desc,
unsigned dim) {
static std::optional<Value> sizeFromTensorAtDim(OpBuilder &builder,
Location loc,
SparseTensorDescriptor desc,
unsigned dim) {
RankedTensorType rtp = desc.getTensorType();
// Access into static dimension can query original type directly.
// Note that this is typically already done by DimOp's folding.
Expand Down Expand Up @@ -681,7 +682,7 @@ class SparseDimOpConverter : public OpConversionPattern<tensor::DimOp> {
LogicalResult
matchAndRewrite(tensor::DimOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
Optional<int64_t> index = op.getConstantIndex();
std::optional<int64_t> index = op.getConstantIndex();
if (!index || !getSparseTensorEncoding(adaptor.getSource().getType()))
return failure();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -706,7 +706,7 @@ class SparseTensorToDimSizeConverter
if (!enc)
return failure();
// Only rewrite DimOp with constant index.
Optional<int64_t> dim = op.getConstantIndex();
std::optional<int64_t> dim = op.getConstantIndex();
if (!dim)
return failure();
// Generate the call.
Expand Down
36 changes: 19 additions & 17 deletions mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -380,7 +380,7 @@ void DimOp::build(OpBuilder &builder, OperationState &result, Value source,
build(builder, result, source, indexValue);
}

Optional<int64_t> DimOp::getConstantIndex() {
std::optional<int64_t> DimOp::getConstantIndex() {
return getConstantIntValue(getIndex());
}

Expand All @@ -400,7 +400,7 @@ Speculation::Speculatability DimOp::getSpeculatability() {

LogicalResult DimOp::verify() {
// Assume unknown index to be in range.
Optional<int64_t> index = getConstantIndex();
std::optional<int64_t> index = getConstantIndex();
if (!index)
return success();

Expand Down Expand Up @@ -598,7 +598,7 @@ struct ReplaceEmptyTensorStaticShapeDims : OpRewritePattern<EmptyOp> {
for (int64_t i = 0; i < op.getType().getRank(); ++i) {
if (op.getType().isDynamicDim(i)) {
Value dynamicSize = op.getDynamicSizes()[ctr++];
Optional<int64_t> cst = getConstantIntValue(dynamicSize);
std::optional<int64_t> cst = getConstantIntValue(dynamicSize);
if (cst.has_value()) {
staticShape[i] = *cst;
changedType = true;
Expand Down Expand Up @@ -626,7 +626,7 @@ struct FoldEmptyTensorWithDimOp : public OpRewritePattern<DimOp> {

LogicalResult matchAndRewrite(tensor::DimOp dimOp,
PatternRewriter &rewriter) const override {
Optional<int64_t> maybeConstantIndex = dimOp.getConstantIndex();
std::optional<int64_t> maybeConstantIndex = dimOp.getConstantIndex();
auto emptyTensorOp = dimOp.getSource().getDefiningOp<EmptyOp>();
if (!emptyTensorOp || !maybeConstantIndex)
return failure();
Expand Down Expand Up @@ -1445,7 +1445,7 @@ struct FoldDimOfExpandShape : public OpRewritePattern<DimOp> {
return failure();

// Only constant dimension values are supported.
Optional<int64_t> dim = dimOp.getConstantIndex();
std::optional<int64_t> dim = dimOp.getConstantIndex();
if (!dim.has_value())
return failure();

Expand Down Expand Up @@ -1489,7 +1489,7 @@ struct FoldDimOfCollapseShape : public OpRewritePattern<DimOp> {
return failure();

// Only constant dimension values are supported.
Optional<int64_t> dim = dimOp.getConstantIndex();
std::optional<int64_t> dim = dimOp.getConstantIndex();
if (!dim.has_value())
return failure();

Expand Down Expand Up @@ -1732,7 +1732,7 @@ llvm::SmallBitVector ExtractSliceOp::getDroppedDims() {
llvm::SmallBitVector droppedDims(mixedSizes.size());
unsigned shapePos = 0;
for (const auto &size : enumerate(mixedSizes)) {
Optional<int64_t> sizeVal = getConstantIntValue(size.value());
std::optional<int64_t> sizeVal = getConstantIntValue(size.value());
// If the size is not 1, or if the current matched dimension of the result
// is the same static shape as the size value (which is 1), then the
// dimension is preserved.
Expand Down Expand Up @@ -2278,15 +2278,16 @@ struct InsertSliceOpCastFolder final : public OpRewritePattern<InsertOpTy> {
}))
return failure();

auto getSourceOfCastOp = [](Value v) -> Optional<Value> {
auto getSourceOfCastOp = [](Value v) -> std::optional<Value> {
auto castOp = v.getDefiningOp<tensor::CastOp>();
if (!castOp || !canFoldIntoConsumerOp(castOp))
return std::nullopt;
return castOp.getSource();
};
Optional<Value> sourceCastSource =
std::optional<Value> sourceCastSource =
getSourceOfCastOp(insertSliceOp.getSource());
Optional<Value> destCastSource = getSourceOfCastOp(insertSliceOp.getDest());
std::optional<Value> destCastSource =
getSourceOfCastOp(insertSliceOp.getDest());
if (!sourceCastSource && !destCastSource)
return failure();

Expand Down Expand Up @@ -2352,7 +2353,7 @@ struct InsertSliceOpSourceCastInserter final
SmallVector<int64_t> newSrcShape(srcType.getShape().begin(),
srcType.getShape().end());
for (int64_t i = 0; i < srcType.getRank(); ++i) {
if (Optional<int64_t> constInt =
if (std::optional<int64_t> constInt =
getConstantIntValue(insertSliceOp.getMixedSizes()[i]))
newSrcShape[i] = *constInt;
}
Expand Down Expand Up @@ -2419,9 +2420,10 @@ void PadOp::getAsmResultNames(function_ref<void(Value, StringRef)> setNameFn) {
void printInferType(OpAsmPrinter &printer, Operation *op, Value optOperand,
Type typeToInfer, Type typeToInferFrom) {}

ParseResult parseInferType(OpAsmParser &parser,
Optional<OpAsmParser::UnresolvedOperand> optOperand,
Type &typeToInfer, Type typeToInferFrom) {
ParseResult
parseInferType(OpAsmParser &parser,
std::optional<OpAsmParser::UnresolvedOperand> optOperand,
Type &typeToInfer, Type typeToInferFrom) {
if (optOperand)
typeToInfer = typeToInferFrom;
return success();
Expand Down Expand Up @@ -3151,7 +3153,7 @@ static LogicalResult commonVerifierPackAndUnPackOp(OpTy packOrUnPack) {
llvm::zip(packedType.getShape().take_back(mixedTiles.size()),
mixedTiles),
[](std::tuple<int64_t, OpFoldResult> it) {
Optional<int64_t> constTileSize =
std::optional<int64_t> constTileSize =
getConstantIntValue(std::get<1>(it));
int64_t shape = std::get<0>(it);
if (!constTileSize) {
Expand Down Expand Up @@ -3232,7 +3234,7 @@ areNotFullTiles(ArrayRef<int64_t> inputShape,
auto it = dimAndTileMapping.find(dim);
if (it == dimAndTileMapping.end())
continue;
Optional<int64_t> constantTile = getConstantIntValue(it->second);
std::optional<int64_t> constantTile = getConstantIntValue(it->second);
if (!constantTile)
continue;
if (inputShape[dim] % (*constantTile) != 0)
Expand Down Expand Up @@ -3333,7 +3335,7 @@ bool areTilesAndTiledDimsAllConstant(OpTy op) {
SmallVector<OpFoldResult> mixedTiles = op.getMixedTiles();
for (auto [dimDest, tile] : llvm::zip(
packedType.getShape().take_back(mixedTiles.size()), mixedTiles)) {
Optional<int64_t> constTileSize = getConstantIntValue(tile);
std::optional<int64_t> constTileSize = getConstantIntValue(tile);
if (!constTileSize || ShapedType::isDynamic(dimDest))
return false;
}
Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/Dialect/Tensor/IR/TensorTilingInterfaceImpl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,7 @@ static UnpackTileDimInfo getUnpackTileDimInfo(OpBuilder &b, UnPackOp unpackOp,
info.isAlignedToInnerTileSize = false;
FailureOr<int64_t> cstSize = linalg::getConstantUpperBoundForIndex(
getValueOrCreateConstantIndexOp(b, loc, tileSize));
Optional<int64_t> cstInnerSize = getConstantIntValue(innerTileSize);
std::optional<int64_t> cstInnerSize = getConstantIntValue(innerTileSize);
if (!failed(cstSize) && cstInnerSize) {
if (cstSize.value() % cstInnerSize.value() == 0)
info.isAlignedToInnerTileSize = true;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ using namespace mlir;

/// Returns true if the the given `attrOrValue` is a constant zero.
static bool isZero(OpFoldResult attrOrValue) {
if (Optional<int64_t> val = getConstantIntValue(attrOrValue))
if (std::optional<int64_t> val = getConstantIntValue(attrOrValue))
return *val == 0;
return false;
}
Expand Down
62 changes: 30 additions & 32 deletions mlir/lib/Dialect/Tosa/IR/TosaOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -373,7 +373,7 @@ static LogicalResult resolveBroadcastShape(const ValueShapeRange &operands,
}

LogicalResult tosa::ArgMaxOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
ShapeAdaptor inputShape = operands.getShape(0);
Expand All @@ -398,7 +398,7 @@ LogicalResult tosa::ArgMaxOp::inferReturnTypeComponents(
}

LogicalResult tosa::ConcatOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
// Infer all dimension sizes by reducing based on inputs.
Expand Down Expand Up @@ -455,7 +455,7 @@ LogicalResult tosa::ConcatOp::inferReturnTypeComponents(
}

LogicalResult tosa::EqualOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
llvm::SmallVector<int64_t> outShape;
Expand All @@ -476,7 +476,7 @@ bool tosa::EqualOp::isCompatibleReturnTypes(TypeRange l, TypeRange r) {
}

LogicalResult tosa::FullyConnectedOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
ShapeAdaptor inputShape = operands.getShape(0);
Expand All @@ -496,9 +496,8 @@ LogicalResult tosa::FullyConnectedOp::inferReturnTypeComponents(
}

if (biasShape.hasRank()) {
outShape[1] = outShape[1] == ShapedType::kDynamic
? biasShape.getDimSize(0)
: outShape[1];
outShape[1] = outShape[1] == ShapedType::kDynamic ? biasShape.getDimSize(0)
: outShape[1];
}

inferredReturnShapes.push_back(ShapedTypeComponents(outShape));
Expand All @@ -508,7 +507,7 @@ LogicalResult tosa::FullyConnectedOp::inferReturnTypeComponents(
LogicalResult FullyConnectedOp::verify() { return verifyConvOp(*this); }

LogicalResult tosa::MatMulOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
ShapeAdaptor lhsShape = operands.getShape(0);
Expand All @@ -524,9 +523,8 @@ LogicalResult tosa::MatMulOp::inferReturnTypeComponents(
}

if (rhsShape.hasRank()) {
outShape[0] = outShape[0] == ShapedType::kDynamic
? rhsShape.getDimSize(0)
: outShape[0];
outShape[0] = outShape[0] == ShapedType::kDynamic ? rhsShape.getDimSize(0)
: outShape[0];
outShape[2] = rhsShape.getDimSize(2);
}

Expand All @@ -535,7 +533,7 @@ LogicalResult tosa::MatMulOp::inferReturnTypeComponents(
}

LogicalResult tosa::PadOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
ShapeAdaptor inputShape = operands.getShape(0);
Expand Down Expand Up @@ -597,7 +595,7 @@ static SmallVector<int64_t> convertToMlirShape(ArrayRef<int64_t> shape) {
}

LogicalResult tosa::SliceOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
ArrayAttr sizes = SliceOpAdaptor(operands, attributes).getSize();
Expand All @@ -607,13 +605,13 @@ LogicalResult tosa::SliceOp::inferReturnTypeComponents(
outputShape.push_back(val.cast<IntegerAttr>().getValue().getSExtValue());
}

inferredReturnShapes.push_back(ShapedTypeComponents(
convertToMlirShape(outputShape)));
inferredReturnShapes.push_back(
ShapedTypeComponents(convertToMlirShape(outputShape)));
return success();
}

LogicalResult tosa::TableOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
ShapeAdaptor inputShape = operands.getShape(0);
Expand All @@ -629,7 +627,7 @@ LogicalResult tosa::TableOp::inferReturnTypeComponents(
}

LogicalResult tosa::TileOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
TileOpAdaptor adaptor(operands, attributes);
Expand Down Expand Up @@ -663,7 +661,7 @@ LogicalResult tosa::TileOp::inferReturnTypeComponents(
}

LogicalResult tosa::ReshapeOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
ReshapeOpAdaptor adaptor(operands, attributes);
Expand Down Expand Up @@ -703,7 +701,7 @@ LogicalResult tosa::ReshapeOp::inferReturnTypeComponents(
}

LogicalResult tosa::TransposeOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
ShapeAdaptor inputShape = operands.getShape(0);
Expand Down Expand Up @@ -770,7 +768,7 @@ LogicalResult tosa::TransposeOp::inferReturnTypeComponents(
}

LogicalResult tosa::GatherOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
llvm::SmallVector<int64_t> outputShape;
Expand All @@ -795,7 +793,7 @@ LogicalResult tosa::GatherOp::inferReturnTypeComponents(
}

LogicalResult tosa::ResizeOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
ResizeOpAdaptor adaptor(operands, attributes);
Expand Down Expand Up @@ -838,7 +836,7 @@ LogicalResult tosa::ResizeOp::inferReturnTypeComponents(
}

LogicalResult tosa::ScatterOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
llvm::SmallVector<int64_t> outputShape;
Expand Down Expand Up @@ -887,7 +885,7 @@ static LogicalResult ReduceInferReturnTypes(

#define REDUCE_SHAPE_INFER(OP) \
LogicalResult OP::inferReturnTypeComponents( \
MLIRContext *context, ::llvm::Optional<Location> location, \
MLIRContext *context, ::std::optional<Location> location, \
ValueShapeRange operands, DictionaryAttr attributes, \
RegionRange regions, \
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) { \
Expand Down Expand Up @@ -918,7 +916,7 @@ static LogicalResult NAryInferReturnTypes(

#define NARY_SHAPE_INFER(OP) \
LogicalResult OP::inferReturnTypeComponents( \
MLIRContext *context, ::llvm::Optional<Location> location, \
MLIRContext *context, ::std::optional<Location> location, \
ValueShapeRange operands, DictionaryAttr attributes, \
RegionRange regions, \
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) { \
Expand Down Expand Up @@ -1007,7 +1005,7 @@ static LogicalResult poolingInferReturnTypes(
}

LogicalResult Conv2DOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
llvm::SmallVector<int64_t> outputShape(4, ShapedType::kDynamic);
Expand Down Expand Up @@ -1074,7 +1072,7 @@ LogicalResult Conv2DOp::inferReturnTypeComponents(
LogicalResult Conv2DOp::verify() { return verifyConvOp(*this); }

LogicalResult Conv3DOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
llvm::SmallVector<int64_t> outputShape(5, ShapedType::kDynamic);
Expand Down Expand Up @@ -1151,21 +1149,21 @@ LogicalResult Conv3DOp::inferReturnTypeComponents(
LogicalResult Conv3DOp::verify() { return verifyConvOp(*this); }

LogicalResult AvgPool2dOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
return poolingInferReturnTypes(operands, attributes, inferredReturnShapes);
}

LogicalResult MaxPool2dOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
return poolingInferReturnTypes(operands, attributes, inferredReturnShapes);
}

LogicalResult DepthwiseConv2DOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
llvm::SmallVector<int64_t> outputShape(4, ShapedType::kDynamic);
Expand Down Expand Up @@ -1245,7 +1243,7 @@ LogicalResult DepthwiseConv2DOp::inferReturnTypeComponents(
LogicalResult DepthwiseConv2DOp::verify() { return verifyConvOp(*this); }

LogicalResult TransposeConv2DOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
TransposeConv2DOp::Adaptor adaptor(operands.getValues(), attributes);
Expand Down Expand Up @@ -1313,7 +1311,7 @@ LogicalResult TransposeConv2DOp::inferReturnTypeComponents(
}

LogicalResult IfOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
llvm::SmallVector<tosa::YieldOp> yieldOps;
Expand Down Expand Up @@ -1357,7 +1355,7 @@ LogicalResult IfOp::inferReturnTypeComponents(
}

LogicalResult WhileOp::inferReturnTypeComponents(
MLIRContext *context, ::llvm::Optional<Location> location,
MLIRContext *context, ::std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &inferredReturnShapes) {
llvm::SmallVector<tosa::YieldOp> yieldOps;
Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/Dialect/Tosa/Transforms/TosaValidation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ struct TosaValidation : public tosa::impl::TosaValidationBase<TosaValidation> {
private:
void runOnOperation() override;

llvm::Optional<TosaProfileEnum> profileType;
std::optional<TosaProfileEnum> profileType;
};

void TosaValidation::runOnOperation() {
Expand Down
16 changes: 8 additions & 8 deletions mlir/lib/Dialect/Transform/IR/TransformOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -117,16 +117,16 @@ LogicalResult PatternApplicatorExtension::findAllMatches(
// AlternativesOp
//===----------------------------------------------------------------------===//

OperandRange
transform::AlternativesOp::getSuccessorEntryOperands(Optional<unsigned> index) {
OperandRange transform::AlternativesOp::getSuccessorEntryOperands(
std::optional<unsigned> index) {
if (index && getOperation()->getNumOperands() == 1)
return getOperation()->getOperands();
return OperandRange(getOperation()->operand_end(),
getOperation()->operand_end());
}

void transform::AlternativesOp::getSuccessorRegions(
Optional<unsigned> index, ArrayRef<Attribute> operands,
std::optional<unsigned> index, ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
for (Region &alternative : llvm::drop_begin(
getAlternatives(), index.has_value() ? *index + 1 : 0)) {
Expand Down Expand Up @@ -338,7 +338,7 @@ void transform::ForeachOp::getEffects(
}

void transform::ForeachOp::getSuccessorRegions(
Optional<unsigned> index, ArrayRef<Attribute> operands,
std::optional<unsigned> index, ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
Region *bodyRegion = &getBody();
if (!index) {
Expand All @@ -353,7 +353,7 @@ void transform::ForeachOp::getSuccessorRegions(
}

OperandRange
transform::ForeachOp::getSuccessorEntryOperands(Optional<unsigned> index) {
transform::ForeachOp::getSuccessorEntryOperands(std::optional<unsigned> index) {
// The iteration variable op handle is mapped to a subset (one op to be
// precise) of the payload ops of the ForeachOp operand.
assert(index && *index == 0 && "unexpected region index");
Expand Down Expand Up @@ -737,8 +737,8 @@ void transform::SequenceOp::getEffects(
}
}

OperandRange
transform::SequenceOp::getSuccessorEntryOperands(Optional<unsigned> index) {
OperandRange transform::SequenceOp::getSuccessorEntryOperands(
std::optional<unsigned> index) {
assert(index && *index == 0 && "unexpected region index");
if (getOperation()->getNumOperands() == 1)
return getOperation()->getOperands();
Expand All @@ -747,7 +747,7 @@ transform::SequenceOp::getSuccessorEntryOperands(Optional<unsigned> index) {
}

void transform::SequenceOp::getSuccessorRegions(
Optional<unsigned> index, ArrayRef<Attribute> operands,
std::optional<unsigned> index, ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
if (!index) {
Region *bodyRegion = &getBody();
Expand Down
9 changes: 4 additions & 5 deletions mlir/lib/Dialect/Utils/ReshapeOpsUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,7 @@ mlir::getReassociationIndicesForCollapse(ArrayRef<int64_t> sourceShape,
// dimensions should also be dynamic and product of all previous unprocessed
// dimensions of the expanded shape should be 1.
if (sourceShape[sourceDim] == ShapedType::kDynamic &&
(currTargetShape != ShapedType::kDynamic ||
prodOfCollapsedDims != 1))
(currTargetShape != ShapedType::kDynamic || prodOfCollapsedDims != 1))
return std::nullopt;

// If the collapsed dim is dynamic, the current expanded dim should also
Expand Down Expand Up @@ -229,7 +228,7 @@ LogicalResult mlir::reshapeLikeShapesAreCompatible(
ArrayRef<ReassociationIndices> reassociationMaps, bool isExpandingReshape) {
unsigned expandedDimStart = 0;
for (const auto &map : llvm::enumerate(reassociationMaps)) {
Optional<int64_t> dynamicShape;
std::optional<int64_t> dynamicShape;
int64_t linearizedStaticShape = 1;
for (const auto &dim : llvm::enumerate(
expandedShape.slice(expandedDimStart, map.value().size()))) {
Expand Down Expand Up @@ -279,8 +278,8 @@ mlir::getSlicedDimensions(ArrayRef<OpFoldResult> sliceInputShape,
llvm::SmallBitVector mask(sliceInputShape.size());
unsigned idx = 0;
for (const auto &[offset, size, stride] : sliceParams) {
Optional<int64_t> offsetConst = getConstantIntValue(offset);
Optional<int64_t> strideConst = getConstantIntValue(stride);
std::optional<int64_t> offsetConst = getConstantIntValue(offset);
std::optional<int64_t> strideConst = getConstantIntValue(stride);
mask[idx] = !isEqualConstantIntOrValue(size, sliceInputShape[idx]) ||
(!strideConst || *strideConst != 1) ||
(!offsetConst || *offsetConst != 0);
Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/Dialect/Utils/StaticValueUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ SmallVector<OpFoldResult> getAsOpFoldResult(ArrayAttr arrayAttr) {
}

/// If ofr is a constant integer or an IntegerAttr, return the integer.
Optional<int64_t> getConstantIntValue(OpFoldResult ofr) {
std::optional<int64_t> getConstantIntValue(OpFoldResult ofr) {
// Case 1: Check for Constant integer.
if (auto val = ofr.dyn_cast<Value>()) {
APSInt intVal;
Expand Down
31 changes: 16 additions & 15 deletions mlir/lib/Dialect/Vector/IR/VectorOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -315,7 +315,8 @@ OpFoldResult MultiDimReductionOp::fold(ArrayRef<Attribute> operands) {
return {};
}

Optional<SmallVector<int64_t, 4>> MultiDimReductionOp::getShapeForUnroll() {
std::optional<SmallVector<int64_t, 4>>
MultiDimReductionOp::getShapeForUnroll() {
return llvm::to_vector<4>(getSourceVectorType().getShape());
}

Expand Down Expand Up @@ -500,7 +501,7 @@ Value mlir::vector::getVectorReductionOp(arith::AtomicRMWKind op,
return nullptr;
}

Optional<SmallVector<int64_t, 4>> ReductionOp::getShapeForUnroll() {
std::optional<SmallVector<int64_t, 4>> ReductionOp::getShapeForUnroll() {
return llvm::to_vector<4>(getVectorType().getShape());
}

Expand Down Expand Up @@ -939,7 +940,7 @@ std::vector<std::pair<int64_t, int64_t>> ContractionOp::getBatchDimMap() {
getContext());
}

Optional<SmallVector<int64_t, 4>> ContractionOp::getShapeForUnroll() {
std::optional<SmallVector<int64_t, 4>> ContractionOp::getShapeForUnroll() {
SmallVector<int64_t, 4> shape;
getIterationBounds(shape);
return shape;
Expand Down Expand Up @@ -1077,7 +1078,7 @@ void vector::ExtractOp::build(OpBuilder &builder, OperationState &result,
}

LogicalResult
ExtractOp::inferReturnTypes(MLIRContext *, Optional<Location>,
ExtractOp::inferReturnTypes(MLIRContext *, std::optional<Location>,
ValueRange operands, DictionaryAttr attributes,
RegionRange,
SmallVectorImpl<Type> &inferredReturnTypes) {
Expand Down Expand Up @@ -1721,7 +1722,7 @@ static void populateFromInt64AttrArray(ArrayAttr arrayAttr,
// FmaOp
//===----------------------------------------------------------------------===//

Optional<SmallVector<int64_t, 4>> FMAOp::getShapeForUnroll() {
std::optional<SmallVector<int64_t, 4>> FMAOp::getShapeForUnroll() {
return llvm::to_vector<4>(getVectorType().getShape());
}

Expand Down Expand Up @@ -2001,7 +2002,7 @@ LogicalResult ShuffleOp::verify() {
}

LogicalResult
ShuffleOp::inferReturnTypes(MLIRContext *, Optional<Location>,
ShuffleOp::inferReturnTypes(MLIRContext *, std::optional<Location>,
ValueRange operands, DictionaryAttr attributes,
RegionRange,
SmallVectorImpl<Type> &inferredReturnTypes) {
Expand Down Expand Up @@ -3178,7 +3179,7 @@ void TransferReadOp::build(OpBuilder &builder, OperationState &result,
void TransferReadOp::build(OpBuilder &builder, OperationState &result,
VectorType vectorType, Value source,
ValueRange indices, AffineMap permutationMap,
Optional<ArrayRef<bool>> inBounds) {
std::optional<ArrayRef<bool>> inBounds) {
auto permutationMapAttr = AffineMapAttr::get(permutationMap);
auto inBoundsAttr = (inBounds && !inBounds.value().empty())
? builder.getBoolArrayAttr(inBounds.value())
Expand All @@ -3191,7 +3192,7 @@ void TransferReadOp::build(OpBuilder &builder, OperationState &result,
void TransferReadOp::build(OpBuilder &builder, OperationState &result,
VectorType vectorType, Value source,
ValueRange indices, Value padding,
Optional<ArrayRef<bool>> inBounds) {
std::optional<ArrayRef<bool>> inBounds) {
AffineMap permutationMap = getTransferMinorIdentityMap(
source.getType().cast<ShapedType>(), vectorType);
auto permutationMapAttr = AffineMapAttr::get(permutationMap);
Expand All @@ -3208,7 +3209,7 @@ void TransferReadOp::build(OpBuilder &builder, OperationState &result,
void TransferReadOp::build(OpBuilder &builder, OperationState &result,
VectorType vectorType, Value source,
ValueRange indices,
Optional<ArrayRef<bool>> inBounds) {
std::optional<ArrayRef<bool>> inBounds) {
Type elemType = source.getType().cast<ShapedType>().getElementType();
Value padding = builder.create<arith::ConstantOp>(
result.location, elemType, builder.getZeroAttr(elemType));
Expand Down Expand Up @@ -3573,7 +3574,7 @@ OpFoldResult TransferReadOp::fold(ArrayRef<Attribute>) {
return OpFoldResult();
}

Optional<SmallVector<int64_t, 4>> TransferReadOp::getShapeForUnroll() {
std::optional<SmallVector<int64_t, 4>> TransferReadOp::getShapeForUnroll() {
return llvm::to_vector<4>(getVectorType().getShape());
}

Expand Down Expand Up @@ -3800,7 +3801,7 @@ void TransferWriteOp::build(OpBuilder &builder, OperationState &result,
void TransferWriteOp::build(OpBuilder &builder, OperationState &result,
Value vector, Value dest, ValueRange indices,
AffineMap permutationMap,
Optional<ArrayRef<bool>> inBounds) {
std::optional<ArrayRef<bool>> inBounds) {
auto permutationMapAttr = AffineMapAttr::get(permutationMap);
auto inBoundsAttr = (inBounds && !inBounds.value().empty())
? builder.getBoolArrayAttr(inBounds.value())
Expand All @@ -3813,7 +3814,7 @@ void TransferWriteOp::build(OpBuilder &builder, OperationState &result,
/// map to 'getMinorIdentityMap'.
void TransferWriteOp::build(OpBuilder &builder, OperationState &result,
Value vector, Value dest, ValueRange indices,
Optional<ArrayRef<bool>> inBounds) {
std::optional<ArrayRef<bool>> inBounds) {
auto vectorType = vector.getType().cast<VectorType>();
AffineMap permutationMap = getTransferMinorIdentityMap(
dest.getType().cast<ShapedType>(), vectorType);
Expand Down Expand Up @@ -4046,7 +4047,7 @@ LogicalResult TransferWriteOp::fold(ArrayRef<Attribute> operands,
return memref::foldMemRefCast(*this);
}

Optional<SmallVector<int64_t, 4>> TransferWriteOp::getShapeForUnroll() {
std::optional<SmallVector<int64_t, 4>> TransferWriteOp::getShapeForUnroll() {
return llvm::to_vector<4>(getVectorType().getShape());
}

Expand Down Expand Up @@ -5037,7 +5038,7 @@ LogicalResult vector::TransposeOp::verify() {
return success();
}

Optional<SmallVector<int64_t, 4>> TransposeOp::getShapeForUnroll() {
std::optional<SmallVector<int64_t, 4>> TransposeOp::getShapeForUnroll() {
return llvm::to_vector<4>(getResultType().getShape());
}

Expand Down Expand Up @@ -5580,7 +5581,7 @@ ParseResult WarpExecuteOnLane0Op::parse(OpAsmParser &parser,
}

void WarpExecuteOnLane0Op::getSuccessorRegions(
Optional<unsigned> index, ArrayRef<Attribute> operands,
std::optional<unsigned> index, ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
if (index) {
regions.push_back(RegionSuccessor(getResults()));
Expand Down
45 changes: 23 additions & 22 deletions mlir/lib/Dialect/Vector/Transforms/VectorTransforms.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ using namespace mlir;
using namespace mlir::vector;

// Helper to find an index in an affine map.
static Optional<int64_t> getResultIndex(AffineMap map, int64_t index) {
static std::optional<int64_t> getResultIndex(AffineMap map, int64_t index) {
for (int64_t i = 0, e = map.getNumResults(); i < e; ++i) {
int64_t idx = map.getDimPosition(i);
if (idx == index)
Expand Down Expand Up @@ -147,11 +147,11 @@ static SmallVector<IntType> extractVector(ArrayAttr arrayAttr) {
}

/// Helper to create arithmetic operation associated with a kind of contraction.
static Optional<Value> createContractArithOp(Location loc, Value x, Value y,
Value acc,
vector::CombiningKind kind,
PatternRewriter &rewriter,
bool isInt) {
static std::optional<Value> createContractArithOp(Location loc, Value x,
Value y, Value acc,
vector::CombiningKind kind,
PatternRewriter &rewriter,
bool isInt) {
using vector::CombiningKind;
Value mul;
if (isInt) {
Expand All @@ -169,12 +169,13 @@ static Optional<Value> createContractArithOp(Location loc, Value x, Value y,
return std::nullopt;
// Special case for fused multiply-add.
if (acc && acc.getType().isa<VectorType>() && kind == CombiningKind::ADD) {
return Optional<Value>(rewriter.create<vector::FMAOp>(loc, x, y, acc));
return std::optional<Value>(
rewriter.create<vector::FMAOp>(loc, x, y, acc));
}
mul = rewriter.create<arith::MulFOp>(loc, x, y);
}
if (!acc)
return Optional<Value>(mul);
return std::optional<Value>(mul);
return makeArithReduction(rewriter, loc, kind, mul, acc);
}

Expand All @@ -191,7 +192,7 @@ static SmallVector<int64_t> getReductionIndex(AffineMap map,

/// Look for a given dimension in an affine map and return its position. Return
/// std::nullopt if the dimension is not in the map results.
static llvm::Optional<unsigned> getDimPosition(AffineMap map, unsigned dim) {
static std::optional<unsigned> getDimPosition(AffineMap map, unsigned dim) {
for (unsigned i = 0, e = map.getNumResults(); i < e; i++) {
if (map.getDimPosition(i) == dim)
return i;
Expand Down Expand Up @@ -552,8 +553,8 @@ class OuterProductOpLowering : public OpRewritePattern<vector::OuterProductOp> {
if (!rhsType) {
// Special case: AXPY operation.
Value b = rewriter.create<vector::BroadcastOp>(loc, lhsType, op.getRhs());
Optional<Value> mult = createContractArithOp(loc, op.getLhs(), b, acc,
kind, rewriter, isInt);
std::optional<Value> mult = createContractArithOp(
loc, op.getLhs(), b, acc, kind, rewriter, isInt);
if (!mult.has_value())
return failure();
rewriter.replaceOp(op, mult.value());
Expand All @@ -570,7 +571,7 @@ class OuterProductOpLowering : public OpRewritePattern<vector::OuterProductOp> {
Value r = nullptr;
if (acc)
r = rewriter.create<vector::ExtractOp>(loc, rhsType, acc, pos);
Optional<Value> m =
std::optional<Value> m =
createContractArithOp(loc, a, op.getRhs(), r, kind, rewriter, isInt);
if (!m.has_value())
return failure();
Expand Down Expand Up @@ -645,7 +646,7 @@ struct ContractOpToElementwise
// Loop through the parallel dimensions to calculate the dimensions to
// broadcast and to permute in order to extract only parallel dimensions.
for (unsigned i = 0; i < numParallelDims; i++) {
llvm::Optional<unsigned> lhsDim =
std::optional<unsigned> lhsDim =
getDimPosition(lhsMap, accMap.getDimPosition(i));
if (lhsDim) {
lhsTranspose.push_back(numLhsDimToBroadcast + *lhsDim);
Expand All @@ -655,7 +656,7 @@ struct ContractOpToElementwise
contractOp.getResultType().cast<VectorType>().getDimSize(i));
lhsTranspose.push_back(lhsDims.size() - 1);
}
llvm::Optional<unsigned> rhsDim =
std::optional<unsigned> rhsDim =
getDimPosition(rhsMap, accMap.getDimPosition(i));
if (rhsDim) {
rhsTranspose.push_back(numRhsDimToBroadcast + *rhsDim);
Expand Down Expand Up @@ -690,7 +691,7 @@ struct ContractOpToElementwise
loc, newLhs, rewriter.getI64ArrayAttr(lhsOffsets));
newRhs = rewriter.create<vector::ExtractOp>(
loc, newRhs, rewriter.getI64ArrayAttr(rhsOffsets));
Optional<Value> result =
std::optional<Value> result =
createContractArithOp(loc, newLhs, newRhs, contractOp.getAcc(),
contractOp.getKind(), rewriter, isInt);
rewriter.replaceOp(contractOp, {*result});
Expand Down Expand Up @@ -2010,8 +2011,8 @@ ContractionOpLowering::lowerReduction(vector::ContractionOp op,
// Use iterator index 0.
int64_t iterIndex = 0;
SmallVector<AffineMap> iMap = op.getIndexingMapsArray();
Optional<int64_t> lookupLhs = getResultIndex(iMap[0], iterIndex);
Optional<int64_t> lookupRhs = getResultIndex(iMap[1], iterIndex);
std::optional<int64_t> lookupLhs = getResultIndex(iMap[0], iterIndex);
std::optional<int64_t> lookupRhs = getResultIndex(iMap[1], iterIndex);
if (!lookupLhs.has_value())
return rewriter.notifyMatchFailure(op, [&](Diagnostic &diag) {
diag << "expected iterIndex=" << iterIndex << "to map to a LHS dimension";
Expand Down Expand Up @@ -2075,7 +2076,7 @@ ContractionOpLowering::lowerReduction(vector::ContractionOp op,
struct TransferReadToVectorLoadLowering
: public OpRewritePattern<vector::TransferReadOp> {
TransferReadToVectorLoadLowering(MLIRContext *context,
llvm::Optional<unsigned> maxRank,
std::optional<unsigned> maxRank,
PatternBenefit benefit = 1)
: OpRewritePattern<vector::TransferReadOp>(context, benefit),
maxTransferRank(maxRank) {}
Expand Down Expand Up @@ -2151,7 +2152,7 @@ struct TransferReadToVectorLoadLowering
return success();
}

llvm::Optional<unsigned> maxTransferRank;
std::optional<unsigned> maxTransferRank;
};

/// Replace a 0-d vector.load with a memref.load + vector.broadcast.
Expand Down Expand Up @@ -2217,7 +2218,7 @@ struct VectorStoreToMemrefStoreLowering
struct TransferWriteToVectorStoreLowering
: public OpRewritePattern<vector::TransferWriteOp> {
TransferWriteToVectorStoreLowering(MLIRContext *context,
llvm::Optional<unsigned> maxRank,
std::optional<unsigned> maxRank,
PatternBenefit benefit = 1)
: OpRewritePattern<vector::TransferWriteOp>(context, benefit),
maxTransferRank(maxRank) {}
Expand Down Expand Up @@ -2280,7 +2281,7 @@ struct TransferWriteToVectorStoreLowering
return success();
}

llvm::Optional<unsigned> maxTransferRank;
std::optional<unsigned> maxTransferRank;
};

// Returns the values in `arrayAttr` as an integer vector.
Expand Down Expand Up @@ -3026,7 +3027,7 @@ void mlir::vector::
}

void mlir::vector::populateVectorTransferLoweringPatterns(
RewritePatternSet &patterns, llvm::Optional<unsigned> maxTransferRank,
RewritePatternSet &patterns, std::optional<unsigned> maxTransferRank,
PatternBenefit benefit) {
patterns.add<TransferReadToVectorLoadLowering,
TransferWriteToVectorStoreLowering>(patterns.getContext(),
Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/IR/BuiltinDialect.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ void BuiltinDialect::initialize() {
//===----------------------------------------------------------------------===//

void ModuleOp::build(OpBuilder &builder, OperationState &state,
Optional<StringRef> name) {
std::optional<StringRef> name) {
state.addRegion()->emplaceBlock();
if (name) {
state.attributes.push_back(builder.getNamedAttr(
Expand All @@ -135,7 +135,7 @@ void ModuleOp::build(OpBuilder &builder, OperationState &state,
}

/// Construct a module from the given context.
ModuleOp ModuleOp::create(Location loc, Optional<StringRef> name) {
ModuleOp ModuleOp::create(Location loc, std::optional<StringRef> name) {
OpBuilder builder(loc->getContext());
return builder.create<ModuleOp>(loc, name);
}
Expand Down
8 changes: 4 additions & 4 deletions mlir/lib/IR/BuiltinTypes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ VectorType VectorType::scaleElementBitwidth(unsigned scale) {
return VectorType();
}

VectorType VectorType::cloneWith(Optional<ArrayRef<int64_t>> shape,
VectorType VectorType::cloneWith(std::optional<ArrayRef<int64_t>> shape,
Type elementType) const {
return VectorType::get(shape.value_or(getShape()), elementType,
getNumScalableDims());
Expand All @@ -268,7 +268,7 @@ ArrayRef<int64_t> TensorType::getShape() const {
return cast<RankedTensorType>().getShape();
}

TensorType TensorType::cloneWith(Optional<ArrayRef<int64_t>> shape,
TensorType TensorType::cloneWith(std::optional<ArrayRef<int64_t>> shape,
Type elementType) const {
if (auto unrankedTy = dyn_cast<UnrankedTensorType>()) {
if (shape)
Expand Down Expand Up @@ -346,7 +346,7 @@ ArrayRef<int64_t> BaseMemRefType::getShape() const {
return cast<MemRefType>().getShape();
}

BaseMemRefType BaseMemRefType::cloneWith(Optional<ArrayRef<int64_t>> shape,
BaseMemRefType BaseMemRefType::cloneWith(std::optional<ArrayRef<int64_t>> shape,
Type elementType) const {
if (auto unrankedTy = dyn_cast<UnrankedMemRefType>()) {
if (!shape)
Expand Down Expand Up @@ -387,7 +387,7 @@ unsigned BaseMemRefType::getMemorySpaceAsInt() const {
/// which dimensions must be kept when e.g. compute MemRef strides under
/// rank-reducing operations. Return std::nullopt if reducedShape cannot be
/// obtained by dropping only `1` entries in `originalShape`.
llvm::Optional<llvm::SmallDenseSet<unsigned>>
std::optional<llvm::SmallDenseSet<unsigned>>
mlir::computeRankReductionMask(ArrayRef<int64_t> originalShape,
ArrayRef<int64_t> reducedShape) {
size_t originalRank = originalShape.size(), reducedRank = reducedShape.size();
Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/IR/Dialect.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ Type Dialect::parseType(DialectAsmParser &parser) const {
return Type();
}

Optional<Dialect::ParseOpHook>
std::optional<Dialect::ParseOpHook>
Dialect::getParseOperationHook(StringRef opName) const {
return std::nullopt;
}
Expand Down
36 changes: 19 additions & 17 deletions mlir/lib/Interfaces/ControlFlowInterfaces.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ SuccessorOperands::SuccessorOperands(unsigned int producedOperandCount,
/// Returns the `BlockArgument` corresponding to operand `operandIndex` in some
/// successor if 'operandIndex' is within the range of 'operands', or
/// std::nullopt if `operandIndex` isn't a successor operand index.
Optional<BlockArgument>
std::optional<BlockArgument>
detail::getBranchSuccessorArgument(const SuccessorOperands &operands,
unsigned operandIndex, Block *successor) {
OperandRange forwardedOperands = operands.getForwardedOperands();
Expand Down Expand Up @@ -90,17 +90,17 @@ detail::verifyBranchSuccessorOperands(Operation *op, unsigned succNo,
/// inputs that flow from `sourceIndex' to the given region, or std::nullopt if
/// the exact type match verification is not necessary (e.g., if the Op verifies
/// the match itself).
static LogicalResult
verifyTypesAlongAllEdges(Operation *op, Optional<unsigned> sourceNo,
function_ref<Optional<TypeRange>(Optional<unsigned>)>
getInputsTypesForRegion) {
static LogicalResult verifyTypesAlongAllEdges(
Operation *op, std::optional<unsigned> sourceNo,
function_ref<std::optional<TypeRange>(std::optional<unsigned>)>
getInputsTypesForRegion) {
auto regionInterface = cast<RegionBranchOpInterface>(op);

SmallVector<RegionSuccessor, 2> successors;
regionInterface.getSuccessorRegions(sourceNo, successors);

for (RegionSuccessor &succ : successors) {
Optional<unsigned> succRegionNo;
std::optional<unsigned> succRegionNo;
if (!succ.isParent())
succRegionNo = succ.getSuccessor()->getRegionNumber();

Expand All @@ -119,7 +119,8 @@ verifyTypesAlongAllEdges(Operation *op, Optional<unsigned> sourceNo,
return diag;
};

Optional<TypeRange> sourceTypes = getInputsTypesForRegion(succRegionNo);
std::optional<TypeRange> sourceTypes =
getInputsTypesForRegion(succRegionNo);
if (!sourceTypes.has_value())
continue;

Expand Down Expand Up @@ -151,7 +152,8 @@ verifyTypesAlongAllEdges(Operation *op, Optional<unsigned> sourceNo,
LogicalResult detail::verifyTypesAlongControlFlowEdges(Operation *op) {
auto regionInterface = cast<RegionBranchOpInterface>(op);

auto inputTypesFromParent = [&](Optional<unsigned> regionNo) -> TypeRange {
auto inputTypesFromParent =
[&](std::optional<unsigned> regionNo) -> TypeRange {
return regionInterface.getSuccessorEntryOperands(regionNo).getTypes();
};

Expand Down Expand Up @@ -179,7 +181,7 @@ LogicalResult detail::verifyTypesAlongControlFlowEdges(Operation *op) {
// implementing the `RegionBranchTerminatorOpInterface`, all should have the
// same operand types when passing them to the same region.

Optional<OperandRange> regionReturnOperands;
std::optional<OperandRange> regionReturnOperands;
for (Block &block : region) {
Operation *terminator = block.getTerminator();
auto terminatorOperands =
Expand All @@ -202,7 +204,7 @@ LogicalResult detail::verifyTypesAlongControlFlowEdges(Operation *op) {
}

auto inputTypesFromRegion =
[&](Optional<unsigned> regionNo) -> Optional<TypeRange> {
[&](std::optional<unsigned> regionNo) -> std::optional<TypeRange> {
// If there is no return-like terminator, the op itself should verify
// type consistency.
if (!regionReturnOperands)
Expand Down Expand Up @@ -307,7 +309,7 @@ bool RegionBranchOpInterface::isRepetitiveRegion(unsigned index) {
}

void RegionBranchOpInterface::getSuccessorRegions(
Optional<unsigned> index, SmallVectorImpl<RegionSuccessor> &regions) {
std::optional<unsigned> index, SmallVectorImpl<RegionSuccessor> &regions) {
unsigned numInputs = 0;
if (index) {
// If the predecessor is a region, get the number of operands from an
Expand Down Expand Up @@ -367,9 +369,9 @@ bool mlir::isRegionReturnLike(Operation *operation) {
/// `OperandRange` represents all operands that are passed to the specified
/// successor region. If `regionIndex` is `std::nullopt`, all operands that are
/// passed to the parent operation will be returned.
Optional<MutableOperandRange>
mlir::getMutableRegionBranchSuccessorOperands(Operation *operation,
Optional<unsigned> regionIndex) {
std::optional<MutableOperandRange>
mlir::getMutableRegionBranchSuccessorOperands(
Operation *operation, std::optional<unsigned> regionIndex) {
// Try to query a RegionBranchTerminatorOpInterface to determine
// all successor operands that will be passed to the successor
// input arguments.
Expand All @@ -388,9 +390,9 @@ mlir::getMutableRegionBranchSuccessorOperands(Operation *operation,
/// Returns the read only operands that are passed to the region with the given
/// `regionIndex`. See `getMutableRegionBranchSuccessorOperands` for more
/// information.
Optional<OperandRange>
std::optional<OperandRange>
mlir::getRegionBranchSuccessorOperands(Operation *operation,
Optional<unsigned> regionIndex) {
std::optional<unsigned> regionIndex) {
auto range = getMutableRegionBranchSuccessorOperands(operation, regionIndex);
return range ? Optional<OperandRange>(*range) : std::nullopt;
return range ? std::optional<OperandRange>(*range) : std::nullopt;
}
11 changes: 6 additions & 5 deletions mlir/lib/Interfaces/InferTypeOpInterface.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -174,12 +174,13 @@ ShapeAdaptor ValueShapeRange::getShape(int index) const {
}

LogicalResult mlir::detail::inferReturnTensorTypes(
function_ref<LogicalResult(
MLIRContext *, Optional<Location> location, ValueShapeRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &retComponents)>
function_ref<
LogicalResult(MLIRContext *, std::optional<Location> location,
ValueShapeRange operands, DictionaryAttr attributes,
RegionRange regions,
SmallVectorImpl<ShapedTypeComponents> &retComponents)>
componentTypeFn,
MLIRContext *context, Optional<Location> location, ValueRange operands,
MLIRContext *context, std::optional<Location> location, ValueRange operands,
DictionaryAttr attributes, RegionRange regions,
SmallVectorImpl<Type> &inferredReturnTypes) {
SmallVector<ShapedTypeComponents, 2> retComponents;
Expand Down
6 changes: 3 additions & 3 deletions mlir/lib/Rewrite/ByteCode.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ PDLByteCodePattern PDLByteCodePattern::create(pdl_interp::RecordMatchOp matchOp,
llvm::to_vector<8>(generatedOpsAttr.getAsValueRange<StringAttr>());

// Check to see if this is pattern matches a specific operation type.
if (Optional<StringRef> rootKind = matchOp.getRootKind())
if (std::optional<StringRef> rootKind = matchOp.getRootKind())
return PDLByteCodePattern(rewriterAddr, configSet, *rootKind, benefit, ctx,
generatedOps);
return PDLByteCodePattern(rewriterAddr, configSet, MatchAnyOpTypeTag(),
Expand Down Expand Up @@ -940,7 +940,7 @@ void Generator::generate(pdl_interp::GetOperandOp op, ByteCodeWriter &writer) {
}
void Generator::generate(pdl_interp::GetOperandsOp op, ByteCodeWriter &writer) {
Value result = op.getValue();
Optional<uint32_t> index = op.getIndex();
std::optional<uint32_t> index = op.getIndex();
writer.append(OpCode::GetOperands,
index.value_or(std::numeric_limits<uint32_t>::max()),
op.getInputOp());
Expand All @@ -960,7 +960,7 @@ void Generator::generate(pdl_interp::GetResultOp op, ByteCodeWriter &writer) {
}
void Generator::generate(pdl_interp::GetResultsOp op, ByteCodeWriter &writer) {
Value result = op.getValue();
Optional<uint32_t> index = op.getIndex();
std::optional<uint32_t> index = op.getIndex();
writer.append(OpCode::GetResults,
index.value_or(std::numeric_limits<uint32_t>::max()),
op.getInputOp());
Expand Down
6 changes: 3 additions & 3 deletions mlir/lib/Target/LLVMIR/DebugImporter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ DIBasicTypeAttr DebugImporter::translateImpl(llvm::DIBasicType *node) {
}

DICompileUnitAttr DebugImporter::translateImpl(llvm::DICompileUnit *node) {
Optional<DIEmissionKind> emissionKind =
std::optional<DIEmissionKind> emissionKind =
symbolizeDIEmissionKind(node->getEmissionKind());
return DICompileUnitAttr::get(context, node->getSourceLanguage(),
translate(node->getFile()),
Expand All @@ -51,7 +51,7 @@ DICompileUnitAttr DebugImporter::translateImpl(llvm::DICompileUnit *node) {
}

DICompositeTypeAttr DebugImporter::translateImpl(llvm::DICompositeType *node) {
Optional<DIFlags> flags = symbolizeDIFlags(node->getFlags());
std::optional<DIFlags> flags = symbolizeDIFlags(node->getFlags());
SmallVector<DINodeAttr> elements;
for (llvm::DINode *element : node->getElements()) {
assert(element && "expected a non-null element type");
Expand Down Expand Up @@ -102,7 +102,7 @@ DIScopeAttr DebugImporter::translateImpl(llvm::DIScope *node) {
}

DISubprogramAttr DebugImporter::translateImpl(llvm::DISubprogram *node) {
Optional<DISubprogramFlags> subprogramFlags =
std::optional<DISubprogramFlags> subprogramFlags =
symbolizeDISubprogramFlags(node->getSubprogram()->getSPFlags());
return DISubprogramAttr::get(
context, translate(node->getUnit()), translate(node->getScope()),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ using namespace mlir;

namespace {
static llvm::omp::ScheduleKind
convertToScheduleKind(Optional<omp::ClauseScheduleKind> schedKind) {
convertToScheduleKind(std::optional<omp::ClauseScheduleKind> schedKind) {
if (!schedKind.has_value())
return llvm::omp::OMP_SCHEDULE_Default;
switch (schedKind.value()) {
Expand Down Expand Up @@ -398,7 +398,7 @@ static omp::ReductionDeclareOp findReductionDecl(omp::WsLoopOp container,
static void
collectReductionDecls(omp::WsLoopOp loop,
SmallVectorImpl<omp::ReductionDeclareOp> &reductions) {
Optional<ArrayAttr> attr = loop.getReductions();
std::optional<ArrayAttr> attr = loop.getReductions();
if (!attr)
return;

Expand Down Expand Up @@ -855,7 +855,8 @@ convertOmpWsLoop(Operation &opInst, llvm::IRBuilderBase &builder,

// TODO: Handle doacross loops when the ordered clause has a parameter.
bool isOrdered = loop.getOrderedVal().has_value();
Optional<omp::ScheduleModifier> scheduleModifier = loop.getScheduleModifier();
std::optional<omp::ScheduleModifier> scheduleModifier =
loop.getScheduleModifier();
bool isSimd = loop.getSimdModifier();

ompBuilder->applyWorkshareLoop(
Expand Down Expand Up @@ -989,11 +990,11 @@ convertOmpSimdLoop(Operation &opInst, llvm::IRBuilderBase &builder,
ompBuilder->collapseLoops(ompLoc.DL, loopInfos, {});

llvm::ConstantInt *simdlen = nullptr;
if (llvm::Optional<uint64_t> simdlenVar = loop.getSimdlen())
if (std::optional<uint64_t> simdlenVar = loop.getSimdlen())
simdlen = builder.getInt64(simdlenVar.value());

llvm::ConstantInt *safelen = nullptr;
if (llvm::Optional<uint64_t> safelenVar = loop.getSafelen())
if (std::optional<uint64_t> safelenVar = loop.getSafelen())
safelen = builder.getInt64(safelenVar.value());

llvm::MapVector<llvm::Value *, llvm::Value *> alignedVars;
Expand All @@ -1009,7 +1010,7 @@ convertOmpSimdLoop(Operation &opInst, llvm::IRBuilderBase &builder,

/// Convert an Atomic Ordering attribute to llvm::AtomicOrdering.
llvm::AtomicOrdering
convertAtomicOrdering(Optional<omp::ClauseMemoryOrderKind> ao) {
convertAtomicOrdering(std::optional<omp::ClauseMemoryOrderKind> ao) {
if (!ao)
return llvm::AtomicOrdering::Monotonic; // Default Memory Ordering

Expand Down
8 changes: 4 additions & 4 deletions mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -687,7 +687,7 @@ LogicalResult ModuleTranslation::convertGlobals() {

addRuntimePreemptionSpecifier(op.getDsoLocal(), var);

Optional<uint64_t> alignment = op.getAlignment();
std::optional<uint64_t> alignment = op.getAlignment();
if (alignment.has_value())
var->setAlignment(llvm::MaybeAlign(alignment.value()));

Expand Down Expand Up @@ -783,7 +783,7 @@ static LogicalResult checkedAddLLVMFnAttribute(Location loc,
/// attribute and the second string beings its value. Note that even integer
/// attributes are expected to have their values expressed as strings.
static LogicalResult
forwardPassthroughAttributes(Location loc, Optional<ArrayAttr> attributes,
forwardPassthroughAttributes(Location loc, std::optional<ArrayAttr> attributes,
llvm::Function *llvmFunc) {
if (!attributes)
return success();
Expand Down Expand Up @@ -1111,7 +1111,7 @@ LogicalResult ModuleTranslation::createAliasScopeMetadata() {
llvm::LLVMContext &ctx = llvmModule->getContext();
llvm::SmallVector<llvm::Metadata *, 2> operands;
operands.push_back({}); // Placeholder for self-reference
if (Optional<StringRef> description = op.getDescription())
if (std::optional<StringRef> description = op.getDescription())
operands.push_back(llvm::MDString::get(ctx, *description));
llvm::MDNode *domain = llvm::MDNode::get(ctx, operands);
domain->replaceOperandWith(0, domain); // Self-reference for uniqueness
Expand All @@ -1130,7 +1130,7 @@ LogicalResult ModuleTranslation::createAliasScopeMetadata() {
llvm::SmallVector<llvm::Metadata *, 3> operands;
operands.push_back({}); // Placeholder for self-reference
operands.push_back(domain);
if (Optional<StringRef> description = op.getDescription())
if (std::optional<StringRef> description = op.getDescription())
operands.push_back(llvm::MDString::get(ctx, *description));
llvm::MDNode *scope = llvm::MDNode::get(ctx, operands);
scope->replaceOperandWith(0, scope); // Self-reference for uniqueness
Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/Tools/PDLL/CodeGen/CPPGen.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ void CodeGen::generate(const ast::Module &astModule, ModuleOp module) {
int patternIndex = 0;
for (pdl::PatternOp pattern : module.getOps<pdl::PatternOp>()) {
// If the pattern has a name, use that. Otherwise, generate a unique name.
if (Optional<StringRef> patternName = pattern.getSymName()) {
if (std::optional<StringRef> patternName = pattern.getSymName()) {
patternNames.insert(patternName->str());
} else {
std::string name;
Expand Down
Loading