56 changes: 29 additions & 27 deletions mlir/lib/Analysis/Presburger/Simplex.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -272,7 +272,7 @@ LogicalResult LexSimplexBase::addCut(unsigned row) {
return moveRowUnknownToColumn(cutRow);
}

Optional<unsigned> LexSimplex::maybeGetNonIntegralVarRow() const {
std::optional<unsigned> LexSimplex::maybeGetNonIntegralVarRow() const {
for (const Unknown &u : var) {
if (u.orientation == Orientation::Column)
continue;
Expand All @@ -292,7 +292,7 @@ MaybeOptimum<SmallVector<MPInt, 8>> LexSimplex::findIntegerLexMin() {
return OptimumKind::Empty;

// Then, if the sample value is integral, we are done.
while (Optional<unsigned> maybeRow = maybeGetNonIntegralVarRow()) {
while (std::optional<unsigned> maybeRow = maybeGetNonIntegralVarRow()) {
// Otherwise, for the variable whose row has a non-integral sample value,
// we add a cut, a constraint that remove this rational point
// while preserving all integer points, thus keeping the lexmin the same.
Expand Down Expand Up @@ -478,7 +478,7 @@ void SymbolicLexSimplex::recordOutput(SymbolicLexMin &result) const {
MultiAffineFunction(funcSpace, output, domainPoly.getLocalReprs())});
}

Optional<unsigned> SymbolicLexSimplex::maybeGetAlwaysViolatedRow() {
std::optional<unsigned> SymbolicLexSimplex::maybeGetAlwaysViolatedRow() {
// First look for rows that are clearly violated just from the big M
// coefficient, without needing to perform any simplex queries on the domain.
for (unsigned row = 0, e = getNumRows(); row < e; ++row)
Expand All @@ -496,7 +496,7 @@ Optional<unsigned> SymbolicLexSimplex::maybeGetAlwaysViolatedRow() {
return {};
}

Optional<unsigned> SymbolicLexSimplex::maybeGetNonIntegralVarRow() {
std::optional<unsigned> SymbolicLexSimplex::maybeGetNonIntegralVarRow() {
for (const Unknown &u : var) {
if (u.orientation == Orientation::Column)
continue;
Expand All @@ -510,7 +510,7 @@ Optional<unsigned> SymbolicLexSimplex::maybeGetNonIntegralVarRow() {
/// The non-branching pivots are just the ones moving the rows
/// that are always violated in the symbol domain.
LogicalResult SymbolicLexSimplex::doNonBranchingPivots() {
while (Optional<unsigned> row = maybeGetAlwaysViolatedRow())
while (std::optional<unsigned> row = maybeGetAlwaysViolatedRow())
if (moveRowUnknownToColumn(*row).failed())
return failure();
return success();
Expand Down Expand Up @@ -612,7 +612,7 @@ SymbolicLexMin SymbolicLexSimplex::computeSymbolicIntegerLexMin() {

// The tableau is rationally consistent for the current domain.
// Now we look for non-integral sample values and add cuts for them.
if (Optional<unsigned> row = maybeGetNonIntegralVarRow()) {
if (std::optional<unsigned> row = maybeGetNonIntegralVarRow()) {
if (addSymbolicCut(*row).failed()) {
// No integral points; return.
--level;
Expand Down Expand Up @@ -675,7 +675,7 @@ bool LexSimplex::rowIsViolated(unsigned row) const {
return false;
}

Optional<unsigned> LexSimplex::maybeGetViolatedRow() const {
std::optional<unsigned> LexSimplex::maybeGetViolatedRow() const {
for (unsigned row = 0, e = getNumRows(); row < e; ++row)
if (rowIsViolated(row))
return row;
Expand All @@ -688,7 +688,7 @@ Optional<unsigned> LexSimplex::maybeGetViolatedRow() const {
LogicalResult LexSimplex::restoreRationalConsistency() {
if (empty)
return failure();
while (Optional<unsigned> maybeViolatedRow = maybeGetViolatedRow())
while (std::optional<unsigned> maybeViolatedRow = maybeGetViolatedRow())
if (moveRowUnknownToColumn(*maybeViolatedRow).failed())
return failure();
return success();
Expand Down Expand Up @@ -865,8 +865,8 @@ unsigned LexSimplexBase::getLexMinPivotColumn(unsigned row, unsigned colA,
///
/// If multiple columns are valid, we break ties by considering a lexicographic
/// ordering where we prefer unknowns with lower index.
Optional<SimplexBase::Pivot> Simplex::findPivot(int row,
Direction direction) const {
std::optional<SimplexBase::Pivot>
Simplex::findPivot(int row, Direction direction) const {
std::optional<unsigned> col;
for (unsigned j = 2, e = getNumColumns(); j < e; ++j) {
MPInt elem = tableau(row, j);
Expand All @@ -885,7 +885,7 @@ Optional<SimplexBase::Pivot> Simplex::findPivot(int row,

Direction newDirection =
tableau(row, *col) < 0 ? flippedDirection(direction) : direction;
Optional<unsigned> maybePivotRow = findPivotRow(row, newDirection, *col);
std::optional<unsigned> maybePivotRow = findPivotRow(row, newDirection, *col);
return Pivot{maybePivotRow.value_or(row), *col};
}

Expand Down Expand Up @@ -977,7 +977,7 @@ LogicalResult Simplex::restoreRow(Unknown &u) {
"unknown should be in row position");

while (tableau(u.pos, 1) < 0) {
Optional<Pivot> maybePivot = findPivot(u.pos, Direction::Up);
std::optional<Pivot> maybePivot = findPivot(u.pos, Direction::Up);
if (!maybePivot)
break;

Expand Down Expand Up @@ -1010,10 +1010,10 @@ LogicalResult Simplex::restoreRow(Unknown &u) {
/// 0 and hence saturates the bound it imposes. We break ties between rows that
/// impose the same bound by considering a lexicographic ordering where we
/// prefer unknowns with lower index value.
Optional<unsigned> Simplex::findPivotRow(Optional<unsigned> skipRow,
Direction direction,
unsigned col) const {
Optional<unsigned> retRow;
std::optional<unsigned> Simplex::findPivotRow(std::optional<unsigned> skipRow,
Direction direction,
unsigned col) const {
std::optional<unsigned> retRow;
// Initialize these to zero in order to silence a warning about retElem and
// retConst being used uninitialized in the initialization of `diff` below. In
// reality, these are always initialized when that line is reached since these
Expand Down Expand Up @@ -1152,7 +1152,7 @@ void SimplexBase::removeLastConstraintRowOrientation() {
//
// If we have a variable, then the column has zero coefficients for every row
// iff no constraints have been added with a non-zero coefficient for this row.
Optional<unsigned> SimplexBase::findAnyPivotRow(unsigned col) {
std::optional<unsigned> SimplexBase::findAnyPivotRow(unsigned col) {
for (unsigned row = nRedundant, e = getNumRows(); row < e; ++row)
if (tableau(row, col) != 0)
return row;
Expand All @@ -1173,13 +1173,14 @@ void Simplex::undoLastConstraint() {
// coefficient for the column. findAnyPivotRow will always be able to
// find such a row for a constraint.
unsigned column = con.back().pos;
if (Optional<unsigned> maybeRow = findPivotRow({}, Direction::Up, column)) {
if (std::optional<unsigned> maybeRow =
findPivotRow({}, Direction::Up, column)) {
pivot(*maybeRow, column);
} else if (Optional<unsigned> maybeRow =
} else if (std::optional<unsigned> maybeRow =
findPivotRow({}, Direction::Down, column)) {
pivot(*maybeRow, column);
} else {
Optional<unsigned> row = findAnyPivotRow(column);
std::optional<unsigned> row = findAnyPivotRow(column);
assert(row && "Pivot should always exist for a constraint!");
pivot(*row, column);
}
Expand All @@ -1198,7 +1199,7 @@ void LexSimplexBase::undoLastConstraint() {
// snapshot, so what pivots we perform while undoing doesn't matter as
// long as we get the unknown to row orientation and remove it.
unsigned column = con.back().pos;
Optional<unsigned> row = findAnyPivotRow(column);
std::optional<unsigned> row = findAnyPivotRow(column);
assert(row && "Pivot should always exist for a constraint!");
pivot(*row, column);
}
Expand Down Expand Up @@ -1324,7 +1325,7 @@ void SimplexBase::intersectIntegerRelation(const IntegerRelation &rel) {
MaybeOptimum<Fraction> Simplex::computeRowOptimum(Direction direction,
unsigned row) {
// Keep trying to find a pivot for the row in the specified direction.
while (Optional<Pivot> maybePivot = findPivot(row, direction)) {
while (std::optional<Pivot> maybePivot = findPivot(row, direction)) {
// If findPivot returns a pivot involving the row itself, then the optimum
// is unbounded, so we return std::nullopt.
if (maybePivot->row == row)
Expand Down Expand Up @@ -1357,7 +1358,7 @@ MaybeOptimum<Fraction> Simplex::computeOptimum(Direction direction,
return OptimumKind::Empty;
if (u.orientation == Orientation::Column) {
unsigned column = u.pos;
Optional<unsigned> pivotRow = findPivotRow({}, direction, column);
std::optional<unsigned> pivotRow = findPivotRow({}, direction, column);
// If no pivot is returned, the constraint is unbounded in the specified
// direction.
if (!pivotRow)
Expand Down Expand Up @@ -1424,7 +1425,8 @@ void Simplex::detectRedundant(unsigned offset, unsigned count) {
Unknown &u = con[offset + i];
if (u.orientation == Orientation::Column) {
unsigned column = u.pos;
Optional<unsigned> pivotRow = findPivotRow({}, Direction::Down, column);
std::optional<unsigned> pivotRow =
findPivotRow({}, Direction::Down, column);
// If no downward pivot is returned, the constraint is unbounded below
// and hence not redundant.
if (!pivotRow)
Expand Down Expand Up @@ -1546,7 +1548,7 @@ Simplex Simplex::makeProduct(const Simplex &a, const Simplex &b) {
return result;
}

Optional<SmallVector<Fraction, 8>> Simplex::getRationalSample() const {
std::optional<SmallVector<Fraction, 8>> Simplex::getRationalSample() const {
if (empty)
return {};

Expand Down Expand Up @@ -1601,7 +1603,7 @@ MaybeOptimum<SmallVector<Fraction, 8>> LexSimplex::getRationalSample() const {
return sample;
}

Optional<SmallVector<MPInt, 8>> Simplex::getSamplePointIfIntegral() const {
std::optional<SmallVector<MPInt, 8>> Simplex::getSamplePointIfIntegral() const {
// If the tableau is empty, no sample point exists.
if (empty)
return {};
Expand Down Expand Up @@ -1969,7 +1971,7 @@ void Simplex::reduceBasis(Matrix &basis, unsigned level) {
///
/// To avoid potentially arbitrarily large recursion depths leading to stack
/// overflows, this algorithm is implemented iteratively.
Optional<SmallVector<MPInt, 8>> Simplex::findIntegerSample() {
std::optional<SmallVector<MPInt, 8>> Simplex::findIntegerSample() {
if (empty)
return {};

Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Analysis/Presburger/Utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -378,11 +378,11 @@ SmallVector<MPInt, 8> presburger::getComplementIneq(ArrayRef<MPInt> ineq) {
return coeffs;
}

SmallVector<Optional<MPInt>, 4>
SmallVector<std::optional<MPInt>, 4>
DivisionRepr::divValuesAt(ArrayRef<MPInt> point) const {
assert(point.size() == getNumNonDivs() && "Incorrect point size");

SmallVector<Optional<MPInt>, 4> divValues(getNumDivs(), std::nullopt);
SmallVector<std::optional<MPInt>, 4> divValues(getNumDivs(), std::nullopt);
bool changed = true;
while (changed) {
changed = false;
Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/AsmParser/AsmParserImpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -287,7 +287,7 @@ class AsmParserImpl : public BaseT {

// Check for a hexadecimal float value.
if (curTok.is(Token::integer)) {
Optional<APFloat> apResult;
std::optional<APFloat> apResult;
if (failed(parser.parseFloatFromIntegerLiteral(
apResult, curTok, isNegative, APFloat::IEEEdouble(),
/*typeSizeInBits=*/64)))
Expand Down
28 changes: 14 additions & 14 deletions mlir/lib/AsmParser/AttributeParser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -356,8 +356,8 @@ Attribute Parser::parseFloatAttr(Type type, bool isNegative) {

/// Construct an APint from a parsed value, a known attribute type and
/// sign.
static Optional<APInt> buildAttributeAPInt(Type type, bool isNegative,
StringRef spelling) {
static std::optional<APInt> buildAttributeAPInt(Type type, bool isNegative,
StringRef spelling) {
// Parse the integer value into an APInt that is big enough to hold the value.
APInt result;
bool isHex = spelling.size() > 1 && spelling[1] == 'x';
Expand Down Expand Up @@ -417,7 +417,7 @@ Attribute Parser::parseDecOrHexAttr(Type type, bool isNegative) {
}

if (auto floatType = type.dyn_cast<FloatType>()) {
Optional<APFloat> result;
std::optional<APFloat> result;
if (failed(parseFloatFromIntegerLiteral(result, tok, isNegative,
floatType.getFloatSemantics(),
floatType.getWidth())))
Expand All @@ -435,7 +435,7 @@ Attribute Parser::parseDecOrHexAttr(Type type, bool isNegative) {
return nullptr;
}

Optional<APInt> apInt = buildAttributeAPInt(type, isNegative, spelling);
std::optional<APInt> apInt = buildAttributeAPInt(type, isNegative, spelling);
if (!apInt)
return emitError(loc, "integer constant out of range for attribute"),
nullptr;
Expand All @@ -450,7 +450,7 @@ Attribute Parser::parseDecOrHexAttr(Type type, bool isNegative) {
/// stored into 'result'.
static ParseResult parseElementAttrHexValues(Parser &parser, Token tok,
std::string &result) {
if (Optional<std::string> value = tok.getHexStringValue()) {
if (std::optional<std::string> value = tok.getHexStringValue()) {
result = std::move(*value);
return success();
}
Expand Down Expand Up @@ -636,7 +636,7 @@ TensorLiteralParser::getIntAttrElements(SMLoc loc, Type eltTy,
}

// Create APInt values for each element with the correct bitwidth.
Optional<APInt> apInt =
std::optional<APInt> apInt =
buildAttributeAPInt(eltTy, isNegative, token.getSpelling());
if (!apInt)
return p.emitError(tokenLoc, "integer constant out of range for type");
Expand All @@ -656,7 +656,7 @@ TensorLiteralParser::getFloatAttrElements(SMLoc loc, FloatType eltTy,

// Handle hexadecimal float literals.
if (token.is(Token::integer) && token.getSpelling().startswith("0x")) {
Optional<APFloat> result;
std::optional<APFloat> result;
if (failed(p.parseFloatFromIntegerLiteral(result, token, isNegative,
eltTy.getFloatSemantics(),
eltTy.getWidth())))
Expand Down Expand Up @@ -880,7 +880,7 @@ ParseResult DenseArrayElementParser::parseIntegerElement(Parser &p) {
bool isNegative = p.consumeIf(Token::minus);

// Parse an integer literal as an APInt.
Optional<APInt> value;
std::optional<APInt> value;
StringRef spelling = p.getToken().getSpelling();
if (p.getToken().isAny(Token::kw_true, Token::kw_false)) {
if (!type.isInteger(1))
Expand All @@ -903,7 +903,7 @@ ParseResult DenseArrayElementParser::parseFloatElement(Parser &p) {
bool isNegative = p.consumeIf(Token::minus);

Token token = p.getToken();
Optional<APFloat> result;
std::optional<APFloat> result;
auto floatType = type.cast<FloatType>();
if (p.consumeIf(Token::integer)) {
// Parse an integer literal as a float.
Expand All @@ -913,7 +913,7 @@ ParseResult DenseArrayElementParser::parseFloatElement(Parser &p) {
return failure();
} else if (p.consumeIf(Token::floatliteral)) {
// Parse a floating point literal.
Optional<double> val = token.getFloatingPointValue();
std::optional<double> val = token.getFloatingPointValue();
if (!val)
return failure();
result = APFloat(isNegative ? -*val : *val);
Expand Down Expand Up @@ -1150,7 +1150,7 @@ Attribute Parser::parseStridedLayoutAttr() {
// Parses either an integer token or a question mark token. Reports an error
// and returns std::nullopt if the current token is neither. The integer token
// must fit into int64_t limits.
auto parseStrideOrOffset = [&]() -> Optional<int64_t> {
auto parseStrideOrOffset = [&]() -> std::optional<int64_t> {
if (consumeIf(Token::question))
return ShapedType::kDynamic;

Expand All @@ -1163,7 +1163,7 @@ Attribute Parser::parseStridedLayoutAttr() {
bool negative = consumeIf(Token::minus);

if (getToken().is(Token::integer)) {
Optional<uint64_t> value = getToken().getUInt64IntegerValue();
std::optional<uint64_t> value = getToken().getUInt64IntegerValue();
if (!value ||
*value > static_cast<uint64_t>(std::numeric_limits<int64_t>::max()))
return emitWrongTokenError();
Expand All @@ -1182,7 +1182,7 @@ Attribute Parser::parseStridedLayoutAttr() {
SmallVector<int64_t> strides;
if (!getToken().is(Token::r_square)) {
do {
Optional<int64_t> stride = parseStrideOrOffset();
std::optional<int64_t> stride = parseStrideOrOffset();
if (!stride)
return nullptr;
strides.push_back(*stride);
Expand All @@ -1205,7 +1205,7 @@ Attribute Parser::parseStridedLayoutAttr() {
failed(parseToken(Token::colon, "expected ':' after 'offset'")))
return nullptr;

Optional<int64_t> offset = parseStrideOrOffset();
std::optional<int64_t> offset = parseStrideOrOffset();
if (!offset || failed(parseToken(Token::greater, "expected '>'")))
return nullptr;

Expand Down
44 changes: 23 additions & 21 deletions mlir/lib/AsmParser/Parser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -276,7 +276,7 @@ OptionalParseResult Parser::parseOptionalInteger(APInt &result) {

/// Parse a floating point value from an integer literal token.
ParseResult Parser::parseFloatFromIntegerLiteral(
Optional<APFloat> &result, const Token &tok, bool isNegative,
std::optional<APFloat> &result, const Token &tok, bool isNegative,
const llvm::fltSemantics &semantics, size_t typeSizeInBits) {
SMLoc loc = tok.getLoc();
StringRef spelling = tok.getSpelling();
Expand All @@ -292,7 +292,7 @@ ParseResult Parser::parseFloatFromIntegerLiteral(
"leading minus");
}

Optional<uint64_t> value = tok.getUInt64IntegerValue();
std::optional<uint64_t> value = tok.getUInt64IntegerValue();
if (!value)
return emitError(loc, "hexadecimal float constant out of range for type");

Expand Down Expand Up @@ -534,12 +534,13 @@ class OperationParser : public Parser {
/// skip parsing that component.
ParseResult parseGenericOperationAfterOpName(
OperationState &result,
Optional<ArrayRef<UnresolvedOperand>> parsedOperandUseInfo = std::nullopt,
Optional<ArrayRef<Block *>> parsedSuccessors = std::nullopt,
Optional<MutableArrayRef<std::unique_ptr<Region>>> parsedRegions =
std::optional<ArrayRef<UnresolvedOperand>> parsedOperandUseInfo =
std::nullopt,
Optional<ArrayRef<NamedAttribute>> parsedAttributes = std::nullopt,
Optional<FunctionType> parsedFnType = std::nullopt);
std::optional<ArrayRef<Block *>> parsedSuccessors = std::nullopt,
std::optional<MutableArrayRef<std::unique_ptr<Region>>> parsedRegions =
std::nullopt,
std::optional<ArrayRef<NamedAttribute>> parsedAttributes = std::nullopt,
std::optional<FunctionType> parsedFnType = std::nullopt);

/// Parse an operation instance that is in the generic form and insert it at
/// the provided insertion point.
Expand Down Expand Up @@ -1250,11 +1251,11 @@ struct CleanupOpStateRegions {

ParseResult OperationParser::parseGenericOperationAfterOpName(
OperationState &result,
Optional<ArrayRef<UnresolvedOperand>> parsedOperandUseInfo,
Optional<ArrayRef<Block *>> parsedSuccessors,
Optional<MutableArrayRef<std::unique_ptr<Region>>> parsedRegions,
Optional<ArrayRef<NamedAttribute>> parsedAttributes,
Optional<FunctionType> parsedFnType) {
std::optional<ArrayRef<UnresolvedOperand>> parsedOperandUseInfo,
std::optional<ArrayRef<Block *>> parsedSuccessors,
std::optional<MutableArrayRef<std::unique_ptr<Region>>> parsedRegions,
std::optional<ArrayRef<NamedAttribute>> parsedAttributes,
std::optional<FunctionType> parsedFnType) {

// Parse the operand list, if not explicitly provided.
SmallVector<UnresolvedOperand, 8> opInfo;
Expand Down Expand Up @@ -1436,7 +1437,8 @@ class CustomOpAsmParser : public AsmParserImpl<OpAsmParser> {
// This can happen if an attribute set during parsing is also specified in
// the attribute dictionary in the assembly, or the attribute is set
// multiple during parsing.
Optional<NamedAttribute> duplicate = opState.attributes.findDuplicate();
std::optional<NamedAttribute> duplicate =
opState.attributes.findDuplicate();
if (duplicate)
return emitError(getNameLoc(), "attribute '")
<< duplicate->getName().getValue()
Expand All @@ -1455,11 +1457,11 @@ class CustomOpAsmParser : public AsmParserImpl<OpAsmParser> {

ParseResult parseGenericOperationAfterOpName(
OperationState &result,
Optional<ArrayRef<UnresolvedOperand>> parsedUnresolvedOperands,
Optional<ArrayRef<Block *>> parsedSuccessors,
Optional<MutableArrayRef<std::unique_ptr<Region>>> parsedRegions,
Optional<ArrayRef<NamedAttribute>> parsedAttributes,
Optional<FunctionType> parsedFnType) final {
std::optional<ArrayRef<UnresolvedOperand>> parsedUnresolvedOperands,
std::optional<ArrayRef<Block *>> parsedSuccessors,
std::optional<MutableArrayRef<std::unique_ptr<Region>>> parsedRegions,
std::optional<ArrayRef<NamedAttribute>> parsedAttributes,
std::optional<FunctionType> parsedFnType) final {
return parser.parseGenericOperationAfterOpName(
result, parsedUnresolvedOperands, parsedSuccessors, parsedRegions,
parsedAttributes, parsedFnType);
Expand Down Expand Up @@ -1777,7 +1779,7 @@ class CustomOpAsmParser : public AsmParserImpl<OpAsmParser> {

/// Parse a loc(...) specifier if present, filling in result if so.
ParseResult
parseOptionalLocationSpecifier(Optional<Location> &result) override {
parseOptionalLocationSpecifier(std::optional<Location> &result) override {
// If there is a 'loc' we parse a trailing location.
if (!parser.consumeIf(Token::kw_loc))
return success();
Expand Down Expand Up @@ -1825,7 +1827,7 @@ FailureOr<OperationName> OperationParser::parseCustomOperationName() {
consumeToken();

// Check to see if this operation name is already registered.
Optional<RegisteredOperationName> opInfo =
std::optional<RegisteredOperationName> opInfo =
RegisteredOperationName::lookup(opName, getContext());
if (opInfo)
return *opInfo;
Expand Down Expand Up @@ -2395,7 +2397,7 @@ class ParsedResourceEntry : public AsmParsedResourceEntry {
// Blob data within then textual format is represented as a hex string.
// TODO: We could avoid an additional alloc+copy here if we pre-allocated
// the buffer to use during hex processing.
Optional<std::string> blobData =
std::optional<std::string> blobData =
value.is(Token::string) ? value.getHexStringValue() : std::nullopt;
if (!blobData)
return p.emitError(value.getLoc(),
Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/AsmParser/Parser.h
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ class Parser {
OptionalParseResult parseOptionalInteger(APInt &result);

/// Parse a floating point value from an integer literal token.
ParseResult parseFloatFromIntegerLiteral(Optional<APFloat> &result,
ParseResult parseFloatFromIntegerLiteral(std::optional<APFloat> &result,
const Token &tok, bool isNegative,
const llvm::fltSemantics &semantics,
size_t typeSizeInBits);
Expand Down
14 changes: 7 additions & 7 deletions mlir/lib/AsmParser/Token.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ SMRange Token::getLocRange() const { return SMRange(getLoc(), getEndLoc()); }

/// For an integer token, return its value as an unsigned. If it doesn't fit,
/// return std::nullopt.
Optional<unsigned> Token::getUnsignedIntegerValue() const {
std::optional<unsigned> Token::getUnsignedIntegerValue() const {
bool isHex = spelling.size() > 1 && spelling[1] == 'x';

unsigned result = 0;
Expand All @@ -37,7 +37,7 @@ Optional<unsigned> Token::getUnsignedIntegerValue() const {

/// For an integer token, return its value as a uint64_t. If it doesn't fit,
/// return std::nullopt.
Optional<uint64_t> Token::getUInt64IntegerValue(StringRef spelling) {
std::optional<uint64_t> Token::getUInt64IntegerValue(StringRef spelling) {
bool isHex = spelling.size() > 1 && spelling[1] == 'x';

uint64_t result = 0;
Expand All @@ -48,15 +48,15 @@ Optional<uint64_t> Token::getUInt64IntegerValue(StringRef spelling) {

/// For a floatliteral, return its value as a double. Return std::nullopt if the
/// value underflows or overflows.
Optional<double> Token::getFloatingPointValue() const {
std::optional<double> Token::getFloatingPointValue() const {
double result = 0;
if (spelling.getAsDouble(result))
return std::nullopt;
return result;
}

/// For an inttype token, return its bitwidth.
Optional<unsigned> Token::getIntTypeBitwidth() const {
std::optional<unsigned> Token::getIntTypeBitwidth() const {
assert(getKind() == inttype);
unsigned bitwidthStart = (spelling[0] == 'i' ? 1 : 2);
unsigned result = 0;
Expand All @@ -65,7 +65,7 @@ Optional<unsigned> Token::getIntTypeBitwidth() const {
return result;
}

Optional<bool> Token::getIntTypeSignedness() const {
std::optional<bool> Token::getIntTypeSignedness() const {
assert(getKind() == inttype);
if (spelling[0] == 'i')
return std::nullopt;
Expand Down Expand Up @@ -127,7 +127,7 @@ std::string Token::getStringValue() const {

/// Given a token containing a hex string literal, return its value or
/// std::nullopt if the token does not contain a valid hex string.
Optional<std::string> Token::getHexStringValue() const {
std::optional<std::string> Token::getHexStringValue() const {
assert(getKind() == string);

// Get the internal string data, without the quotes.
Expand Down Expand Up @@ -158,7 +158,7 @@ std::string Token::getSymbolReference() const {
/// Given a hash_identifier token like #123, try to parse the number out of
/// the identifier, returning std::nullopt if it is a named identifier like #x
/// or if the integer doesn't fit.
Optional<unsigned> Token::getHashIdentifierNumber() const {
std::optional<unsigned> Token::getHashIdentifierNumber() const {
assert(getKind() == hash_identifier);
unsigned result = 0;
if (spelling.drop_front().getAsInteger(10, result))
Expand Down
16 changes: 8 additions & 8 deletions mlir/lib/AsmParser/Token.h
Original file line number Diff line number Diff line change
Expand Up @@ -76,31 +76,31 @@ class Token {

/// For an integer token, return its value as an unsigned. If it doesn't fit,
/// return std::nullopt.
Optional<unsigned> getUnsignedIntegerValue() const;
std::optional<unsigned> getUnsignedIntegerValue() const;

/// For an integer token, return its value as an uint64_t. If it doesn't fit,
/// return std::nullopt.
static Optional<uint64_t> getUInt64IntegerValue(StringRef spelling);
Optional<uint64_t> getUInt64IntegerValue() const {
static std::optional<uint64_t> getUInt64IntegerValue(StringRef spelling);
std::optional<uint64_t> getUInt64IntegerValue() const {
return getUInt64IntegerValue(getSpelling());
}

/// For a floatliteral token, return its value as a double. Returns
/// std::nullopt in the case of underflow or overflow.
Optional<double> getFloatingPointValue() const;
std::optional<double> getFloatingPointValue() const;

/// For an inttype token, return its bitwidth.
Optional<unsigned> getIntTypeBitwidth() const;
std::optional<unsigned> getIntTypeBitwidth() const;

/// For an inttype token, return its signedness semantics: std::nullopt means
/// no signedness semantics; true means signed integer type; false means
/// unsigned integer type.
Optional<bool> getIntTypeSignedness() const;
std::optional<bool> getIntTypeSignedness() const;

/// Given a hash_identifier token like #123, try to parse the number out of
/// the identifier, returning std::nullopt if it is a named identifier like #x
/// or if the integer doesn't fit.
Optional<unsigned> getHashIdentifierNumber() const;
std::optional<unsigned> getHashIdentifierNumber() const;

/// Given a token containing a string literal, return its value, including
/// removing the quote characters and unescaping the contents of the string.
Expand All @@ -110,7 +110,7 @@ class Token {
/// std::nullopt if the token does not contain a valid hex string. A hex
/// string literal is a string starting with `0x` and only containing hex
/// digits.
Optional<std::string> getHexStringValue() const;
std::optional<std::string> getHexStringValue() const;

/// Given a token containing a symbol reference, return the unescaped string
/// value.
Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/AsmParser/TypeParser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -281,7 +281,7 @@ Type Parser::parseNonFunctionType() {
}

IntegerType::SignednessSemantics signSemantics = IntegerType::Signless;
if (Optional<bool> signedness = getToken().getIntTypeSignedness())
if (std::optional<bool> signedness = getToken().getIntTypeSignedness())
signSemantics = *signedness ? IntegerType::Signed : IntegerType::Unsigned;

consumeToken(Token::inttype);
Expand Down Expand Up @@ -561,7 +561,7 @@ ParseResult Parser::parseIntegerInDimensionList(int64_t &value) {
consumeToken();
} else {
// Make sure this integer value is in bound and valid.
Optional<uint64_t> dimension = getToken().getUInt64IntegerValue();
std::optional<uint64_t> dimension = getToken().getUInt64IntegerValue();
if (!dimension ||
*dimension > (uint64_t)std::numeric_limits<int64_t>::max())
return emitError("invalid dimension");
Expand Down
8 changes: 4 additions & 4 deletions mlir/lib/Bindings/Python/DialectSparseTensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@ static void populateDialectSparseTensorSubmodule(const py::module &m) {
"get",
[](py::object cls,
std::vector<MlirSparseTensorDimLevelType> dimLevelTypes,
llvm::Optional<MlirAffineMap> dimOrdering,
llvm::Optional<MlirAffineMap> higherOrdering, int pointerBitWidth,
std::optional<MlirAffineMap> dimOrdering,
std::optional<MlirAffineMap> higherOrdering, int pointerBitWidth,
int indexBitWidth, MlirContext context) {
return cls(mlirSparseTensorEncodingAttrGet(
context, dimLevelTypes.size(), dimLevelTypes.data(),
Expand All @@ -60,7 +60,7 @@ static void populateDialectSparseTensorSubmodule(const py::module &m) {
})
.def_property_readonly(
"dim_ordering",
[](MlirAttribute self) -> llvm::Optional<MlirAffineMap> {
[](MlirAttribute self) -> std::optional<MlirAffineMap> {
MlirAffineMap ret =
mlirSparseTensorEncodingAttrGetDimOrdering(self);
if (mlirAffineMapIsNull(ret))
Expand All @@ -69,7 +69,7 @@ static void populateDialectSparseTensorSubmodule(const py::module &m) {
})
.def_property_readonly(
"higher_ordering",
[](MlirAttribute self) -> llvm::Optional<MlirAffineMap> {
[](MlirAttribute self) -> std::optional<MlirAffineMap> {
MlirAffineMap ret =
mlirSparseTensorEncodingAttrGetHigherOrdering(self);
if (mlirAffineMapIsNull(ret))
Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Bindings/Python/Globals.h
Original file line number Diff line number Diff line change
Expand Up @@ -84,12 +84,12 @@ class PyGlobals {

/// Looks up a registered dialect class by namespace. Note that this may
/// trigger loading of the defining module and can arbitrarily re-enter.
llvm::Optional<pybind11::object>
std::optional<pybind11::object>
lookupDialectClass(const std::string &dialectNamespace);

/// Looks up a registered raw OpView class by operation name. Note that this
/// may trigger a load of the dialect, which can arbitrarily re-enter.
llvm::Optional<pybind11::object>
std::optional<pybind11::object>
lookupRawOpViewClass(llvm::StringRef operationName);

private:
Expand Down
7 changes: 4 additions & 3 deletions mlir/lib/Bindings/Python/IRAttributes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -546,8 +546,9 @@ class PyDenseElementsAttribute
using PyConcreteAttribute::PyConcreteAttribute;

static PyDenseElementsAttribute
getFromBuffer(py::buffer array, bool signless, Optional<PyType> explicitType,
Optional<std::vector<int64_t>> explicitShape,
getFromBuffer(py::buffer array, bool signless,
std::optional<PyType> explicitType,
std::optional<std::vector<int64_t>> explicitShape,
DefaultingPyMlirContext contextWrapper) {
// Request a contiguous view. In exotic cases, this will cause a copy.
int flags = PyBUF_C_CONTIGUOUS | PyBUF_FORMAT;
Expand All @@ -573,7 +574,7 @@ class PyDenseElementsAttribute
// Notably, this excludes, bool (which needs to be bit-packed) and
// other exotics which do not have a direct representation in the buffer
// protocol (i.e. complex, etc).
Optional<MlirType> bulkLoadElementType;
std::optional<MlirType> bulkLoadElementType;
if (explicitType) {
bulkLoadElementType = *explicitType;
} else if (arrayInfo.format == "f") {
Expand Down
38 changes: 20 additions & 18 deletions mlir/lib/Bindings/Python/IRCore.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1066,7 +1066,7 @@ void PyOperation::checkValid() const {
}

void PyOperationBase::print(py::object fileObject, bool binary,
llvm::Optional<int64_t> largeElementsLimit,
std::optional<int64_t> largeElementsLimit,
bool enableDebugInfo, bool prettyDebugInfo,
bool printGenericOpForm, bool useLocalScope,
bool assumeVerified) {
Expand Down Expand Up @@ -1112,7 +1112,7 @@ void PyOperationBase::writeBytecode(const py::object &fileObject) {
}

py::object PyOperationBase::getAsm(bool binary,
llvm::Optional<int64_t> largeElementsLimit,
std::optional<int64_t> largeElementsLimit,
bool enableDebugInfo, bool prettyDebugInfo,
bool printGenericOpForm, bool useLocalScope,
bool assumeVerified) {
Expand Down Expand Up @@ -1151,7 +1151,7 @@ void PyOperationBase::moveBefore(PyOperationBase &other) {
operation.parentKeepAlive = otherOp.parentKeepAlive;
}

llvm::Optional<PyOperationRef> PyOperation::getParentOperation() {
std::optional<PyOperationRef> PyOperation::getParentOperation() {
checkValid();
if (!isAttached())
throw SetPyError(PyExc_ValueError, "Detached operations have no parent");
Expand All @@ -1163,7 +1163,7 @@ llvm::Optional<PyOperationRef> PyOperation::getParentOperation() {

PyBlock PyOperation::getBlock() {
checkValid();
llvm::Optional<PyOperationRef> parentOperation = getParentOperation();
std::optional<PyOperationRef> parentOperation = getParentOperation();
MlirBlock block = mlirOperationGetBlock(get());
assert(!mlirBlockIsNull(block) && "Attached operation has null parent");
assert(parentOperation && "Operation has no parent");
Expand Down Expand Up @@ -1199,12 +1199,13 @@ static void maybeInsertOperation(PyOperationRef &op,
}
}

py::object PyOperation::create(
const std::string &name, llvm::Optional<std::vector<PyType *>> results,
llvm::Optional<std::vector<PyValue *>> operands,
llvm::Optional<py::dict> attributes,
llvm::Optional<std::vector<PyBlock *>> successors, int regions,
DefaultingPyLocation location, const py::object &maybeIp) {
py::object PyOperation::create(const std::string &name,
std::optional<std::vector<PyType *>> results,
std::optional<std::vector<PyValue *>> operands,
std::optional<py::dict> attributes,
std::optional<std::vector<PyBlock *>> successors,
int regions, DefaultingPyLocation location,
const py::object &maybeIp) {
llvm::SmallVector<MlirValue, 4> mlirOperands;
llvm::SmallVector<MlirType, 4> mlirResults;
llvm::SmallVector<MlirBlock, 4> mlirSuccessors;
Expand Down Expand Up @@ -1357,12 +1358,13 @@ void PyOperation::erase() {
// PyOpView
//------------------------------------------------------------------------------

py::object PyOpView::buildGeneric(
const py::object &cls, py::list resultTypeList, py::list operandList,
llvm::Optional<py::dict> attributes,
llvm::Optional<std::vector<PyBlock *>> successors,
llvm::Optional<int> regions, DefaultingPyLocation location,
const py::object &maybeIp) {
py::object
PyOpView::buildGeneric(const py::object &cls, py::list resultTypeList,
py::list operandList, std::optional<py::dict> attributes,
std::optional<std::vector<PyBlock *>> successors,
std::optional<int> regions,
DefaultingPyLocation location,
const py::object &maybeIp) {
PyMlirContextRef context = location->getContext();
// Class level operation construction metadata.
std::string name = py::cast<std::string>(cls.attr("OPERATION_NAME"));
Expand Down Expand Up @@ -2518,7 +2520,7 @@ void mlir::python::populateIRCore(py::module &m) {
.def_static(
"fused",
[](const std::vector<PyLocation> &pyLocations,
llvm::Optional<PyAttribute> metadata,
std::optional<PyAttribute> metadata,
DefaultingPyMlirContext context) {
llvm::SmallVector<MlirLocation, 4> locations;
locations.reserve(pyLocations.size());
Expand All @@ -2533,7 +2535,7 @@ void mlir::python::populateIRCore(py::module &m) {
py::arg("context") = py::none(), kContextGetFusedLocationDocstring)
.def_static(
"name",
[](std::string name, llvm::Optional<PyLocation> childLoc,
[](std::string name, std::optional<PyLocation> childLoc,
DefaultingPyMlirContext context) {
return PyLocation(
context->getRef(),
Expand Down
6 changes: 3 additions & 3 deletions mlir/lib/Bindings/Python/IRInterfaces.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -185,9 +185,9 @@ class PyInferTypeOpInterface
/// Given the arguments required to build an operation, attempts to infer its
/// return types. Throws value_error on faliure.
std::vector<PyType>
inferReturnTypes(llvm::Optional<std::vector<PyValue>> operands,
llvm::Optional<PyAttribute> attributes,
llvm::Optional<std::vector<PyRegion>> regions,
inferReturnTypes(std::optional<std::vector<PyValue>> operands,
std::optional<PyAttribute> attributes,
std::optional<std::vector<PyRegion>> regions,
DefaultingPyMlirContext context,
DefaultingPyLocation location) {
llvm::SmallVector<MlirValue> mlirOperands;
Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Bindings/Python/IRModule.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ PyGlobals::lookupAttributeBuilder(const std::string &attributeKind) {
return std::nullopt;
}

llvm::Optional<py::object>
std::optional<py::object>
PyGlobals::lookupDialectClass(const std::string &dialectNamespace) {
loadDialectModule(dialectNamespace);
// Fast match against the class map first (common case).
Expand All @@ -129,7 +129,7 @@ PyGlobals::lookupDialectClass(const std::string &dialectNamespace) {
return std::nullopt;
}

llvm::Optional<pybind11::object>
std::optional<pybind11::object>
PyGlobals::lookupRawOpViewClass(llvm::StringRef operationName) {
{
auto foundIt = rawOpViewClassMapCache.find(operationName);
Expand Down
26 changes: 13 additions & 13 deletions mlir/lib/Bindings/Python/IRModule.h
Original file line number Diff line number Diff line change
Expand Up @@ -347,7 +347,7 @@ class PyDiagnosticHandler {
private:
MlirContext context;
pybind11::object callback;
llvm::Optional<MlirDiagnosticHandlerID> registeredID;
std::optional<MlirDiagnosticHandlerID> registeredID;
bool hadError = false;
friend class PyMlirContext;
};
Expand Down Expand Up @@ -504,11 +504,11 @@ class PyOperationBase {
virtual ~PyOperationBase() = default;
/// Implements the bound 'print' method and helps with others.
void print(pybind11::object fileObject, bool binary,
llvm::Optional<int64_t> largeElementsLimit, bool enableDebugInfo,
std::optional<int64_t> largeElementsLimit, bool enableDebugInfo,
bool prettyDebugInfo, bool printGenericOpForm, bool useLocalScope,
bool assumeVerified);
pybind11::object getAsm(bool binary,
llvm::Optional<int64_t> largeElementsLimit,
std::optional<int64_t> largeElementsLimit,
bool enableDebugInfo, bool prettyDebugInfo,
bool printGenericOpForm, bool useLocalScope,
bool assumeVerified);
Expand Down Expand Up @@ -586,7 +586,7 @@ class PyOperation : public PyOperationBase, public BaseContextObject {

/// Gets the parent operation or raises an exception if the operation has
/// no parent.
llvm::Optional<PyOperationRef> getParentOperation();
std::optional<PyOperationRef> getParentOperation();

/// Gets a capsule wrapping the void* within the MlirOperation.
pybind11::object getCapsule();
Expand All @@ -598,10 +598,10 @@ class PyOperation : public PyOperationBase, public BaseContextObject {

/// Creates an operation. See corresponding python docstring.
static pybind11::object
create(const std::string &name, llvm::Optional<std::vector<PyType *>> results,
llvm::Optional<std::vector<PyValue *>> operands,
llvm::Optional<pybind11::dict> attributes,
llvm::Optional<std::vector<PyBlock *>> successors, int regions,
create(const std::string &name, std::optional<std::vector<PyType *>> results,
std::optional<std::vector<PyValue *>> operands,
std::optional<pybind11::dict> attributes,
std::optional<std::vector<PyBlock *>> successors, int regions,
DefaultingPyLocation location, const pybind11::object &ip);

/// Creates an OpView suitable for this operation.
Expand Down Expand Up @@ -656,9 +656,9 @@ class PyOpView : public PyOperationBase {
static pybind11::object
buildGeneric(const pybind11::object &cls, pybind11::list resultTypeList,
pybind11::list operandList,
llvm::Optional<pybind11::dict> attributes,
llvm::Optional<std::vector<PyBlock *>> successors,
llvm::Optional<int> regions, DefaultingPyLocation location,
std::optional<pybind11::dict> attributes,
std::optional<std::vector<PyBlock *>> successors,
std::optional<int> regions, DefaultingPyLocation location,
const pybind11::object &maybeIp);

private:
Expand Down Expand Up @@ -738,10 +738,10 @@ class PyInsertionPoint {
private:
// Trampoline constructor that avoids null initializing members while
// looking up parents.
PyInsertionPoint(PyBlock block, llvm::Optional<PyOperationRef> refOperation)
PyInsertionPoint(PyBlock block, std::optional<PyOperationRef> refOperation)
: refOperation(std::move(refOperation)), block(std::move(block)) {}

llvm::Optional<PyOperationRef> refOperation;
std::optional<PyOperationRef> refOperation;
PyBlock block;
};
/// Wrapper around the generic MlirType.
Expand Down
6 changes: 2 additions & 4 deletions mlir/lib/Bindings/Python/IRTypes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -401,8 +401,7 @@ class PyRankedTensorType
c.def_static(
"get",
[](std::vector<int64_t> shape, PyType &elementType,
llvm::Optional<PyAttribute> &encodingAttr,
DefaultingPyLocation loc) {
std::optional<PyAttribute> &encodingAttr, DefaultingPyLocation loc) {
MlirType t = mlirRankedTensorTypeGetChecked(
loc, shape.size(), shape.data(), elementType,
encodingAttr ? encodingAttr->get() : mlirAttributeGetNull());
Expand All @@ -423,8 +422,7 @@ class PyRankedTensorType
py::arg("encoding") = py::none(), py::arg("loc") = py::none(),
"Create a ranked tensor type");
c.def_property_readonly(
"encoding",
[](PyRankedTensorType &self) -> llvm::Optional<PyAttribute> {
"encoding", [](PyRankedTensorType &self) -> std::optional<PyAttribute> {
MlirAttribute encoding = mlirRankedTensorTypeGetEncoding(self.get());
if (mlirAttributeIsNull(encoding))
return std::nullopt;
Expand Down
11 changes: 6 additions & 5 deletions mlir/lib/Bytecode/Reader/BytecodeReader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1121,8 +1121,8 @@ class BytecodeReader {
// Resource Section

LogicalResult
parseResourceSection(Optional<ArrayRef<uint8_t>> resourceData,
Optional<ArrayRef<uint8_t>> resourceOffsetData);
parseResourceSection(std::optional<ArrayRef<uint8_t>> resourceData,
std::optional<ArrayRef<uint8_t>> resourceOffsetData);

//===--------------------------------------------------------------------===//
// IR Section
Expand Down Expand Up @@ -1269,7 +1269,8 @@ LogicalResult BytecodeReader::read(llvm::MemoryBufferRef buffer, Block *block) {
});

// Parse the raw data for each of the top-level sections of the bytecode.
Optional<ArrayRef<uint8_t>> sectionDatas[bytecode::Section::kNumSections];
std::optional<ArrayRef<uint8_t>>
sectionDatas[bytecode::Section::kNumSections];
while (!reader.empty()) {
// Read the next section from the bytecode.
bytecode::Section::ID sectionID;
Expand Down Expand Up @@ -1389,8 +1390,8 @@ FailureOr<OperationName> BytecodeReader::parseOpName(EncodingReader &reader) {
// Resource Section

LogicalResult BytecodeReader::parseResourceSection(
Optional<ArrayRef<uint8_t>> resourceData,
Optional<ArrayRef<uint8_t>> resourceOffsetData) {
std::optional<ArrayRef<uint8_t>> resourceData,
std::optional<ArrayRef<uint8_t>> resourceOffsetData) {
// Ensure both sections are either present or not.
if (resourceData.has_value() != resourceOffsetData.has_value()) {
if (resourceOffsetData)
Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/CAPI/IR/IR.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -289,7 +289,7 @@ void mlirOperationStateEnableResultTypeInference(MlirOperationState *state) {

static LogicalResult inferOperationTypes(OperationState &state) {
MLIRContext *context = state.getContext();
Optional<RegisteredOperationName> info = state.name.getRegisteredInfo();
std::optional<RegisteredOperationName> info = state.name.getRegisteredInfo();
if (!info) {
emitError(state.location)
<< "type inference was requested for the operation " << state.name
Expand Down
7 changes: 4 additions & 3 deletions mlir/lib/CAPI/IR/Pass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ namespace mlir {
class ExternalPass : public Pass {
public:
ExternalPass(TypeID passID, StringRef name, StringRef argument,
StringRef description, Optional<StringRef> opName,
StringRef description, std::optional<StringRef> opName,
ArrayRef<MlirDialectHandle> dependentDialects,
MlirExternalPassCallbacks callbacks, void *userData)
: Pass(passID, opName), id(passID), name(name), argument(argument),
Expand Down Expand Up @@ -143,7 +143,7 @@ class ExternalPass : public Pass {
}

bool canScheduleOn(RegisteredOperationName opName) const override {
if (Optional<StringRef> specifiedOpName = getOpName())
if (std::optional<StringRef> specifiedOpName = getOpName())
return opName.getStringRef() == specifiedOpName;
return true;
}
Expand Down Expand Up @@ -179,7 +179,8 @@ MlirPass mlirCreateExternalPass(MlirTypeID passID, MlirStringRef name,
void *userData) {
return wrap(static_cast<mlir::Pass *>(new mlir::ExternalPass(
unwrap(passID), unwrap(name), unwrap(argument), unwrap(description),
opName.length > 0 ? Optional<StringRef>(unwrap(opName)) : std::nullopt,
opName.length > 0 ? std::optional<StringRef>(unwrap(opName))
: std::nullopt,
{dependentDialects, static_cast<size_t>(nDependentDialects)}, callbacks,
userData)));
}
Expand Down
6 changes: 3 additions & 3 deletions mlir/lib/CAPI/Interfaces/Interfaces.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,15 +19,15 @@ using namespace mlir;

bool mlirOperationImplementsInterface(MlirOperation operation,
MlirTypeID interfaceTypeID) {
Optional<RegisteredOperationName> info =
std::optional<RegisteredOperationName> info =
unwrap(operation)->getRegisteredInfo();
return info && info->hasInterface(unwrap(interfaceTypeID));
}

bool mlirOperationImplementsInterfaceStatic(MlirStringRef operationName,
MlirContext context,
MlirTypeID interfaceTypeID) {
Optional<RegisteredOperationName> info = RegisteredOperationName::lookup(
std::optional<RegisteredOperationName> info = RegisteredOperationName::lookup(
StringRef(operationName.data, operationName.length), unwrap(context));
return info && info->hasInterface(unwrap(interfaceTypeID));
}
Expand All @@ -42,7 +42,7 @@ MlirLogicalResult mlirInferTypeOpInterfaceInferReturnTypes(
intptr_t nRegions, MlirRegion *regions, MlirTypesCallback callback,
void *userData) {
StringRef name(opName.data, opName.length);
Optional<RegisteredOperationName> info =
std::optional<RegisteredOperationName> info =
RegisteredOperationName::lookup(name, unwrap(context));
if (!info)
return mlirLogicalResultFailure();
Expand Down
5 changes: 3 additions & 2 deletions mlir/lib/Conversion/AMDGPUToROCDL/AMDGPUToROCDL.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -312,7 +312,8 @@ static Value mfmaConcatIfNeeded(ConversionPatternRewriter &rewriter,
/// Return the `rocdl` intrinsic corresponding to a MFMA operation `mfma`
/// if one exists. This includes checking to ensure the intrinsic is supported
/// on the architecture you are compiling for.
static Optional<StringRef> mfmaOpToIntrinsic(MFMAOp mfma, Chipset chipset) {
static std::optional<StringRef> mfmaOpToIntrinsic(MFMAOp mfma,
Chipset chipset) {
uint32_t m = mfma.getM(), n = mfma.getN(), k = mfma.getK(),
b = mfma.getBlocks();
Type sourceElem = mfma.getSourceA().getType();
Expand Down Expand Up @@ -428,7 +429,7 @@ struct MFMAOpLowering : public ConvertOpToLLVMPattern<MFMAOp> {
getBlgpField |=
op.getNegateA() | (op.getNegateB() << 1) | (op.getNegateC() << 2);
}
Optional<StringRef> maybeIntrinsic = mfmaOpToIntrinsic(op, chipset);
std::optional<StringRef> maybeIntrinsic = mfmaOpToIntrinsic(op, chipset);
if (!maybeIntrinsic.has_value())
return op.emitOpError("no intrinsic matching MFMA size on given chipset");
OperationState loweredOp(loc, *maybeIntrinsic);
Expand Down
8 changes: 4 additions & 4 deletions mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -448,10 +448,10 @@ static Value createGroupReduceOpImpl(OpBuilder &builder, Location loc,
.getResult();
}

static llvm::Optional<Value> createGroupReduceOp(OpBuilder &builder,
Location loc, Value arg,
gpu::AllReduceOperation opType,
bool isGroup, bool isUniform) {
static std::optional<Value> createGroupReduceOp(OpBuilder &builder,
Location loc, Value arg,
gpu::AllReduceOperation opType,
bool isGroup, bool isUniform) {
using FuncT = Value (*)(OpBuilder &, Location, Value, bool, bool);
struct OpHandler {
gpu::AllReduceOperation type;
Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/Conversion/MathToLibm/MathToLibm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ ScalarOpToLibmCall<Op>::matchAndRewrite(Op op,

void mlir::populateMathToLibmConversionPatterns(
RewritePatternSet &patterns, PatternBenefit benefit,
llvm::Optional<PatternBenefit> log1pBenefit) {
std::optional<PatternBenefit> log1pBenefit) {
patterns.add<VecOpToScalarOp<math::Atan2Op>, VecOpToScalarOp<math::CbrtOp>,
VecOpToScalarOp<math::ExpM1Op>, VecOpToScalarOp<math::TanhOp>,
VecOpToScalarOp<math::CosOp>, VecOpToScalarOp<math::SinOp>,
Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Conversion/MemRefToSPIRV/MemRefToSPIRV.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ static bool isAllocationSupported(Operation *allocOp, MemRefType type) {
/// Returns the scope to use for atomic operations use for emulating store
/// operations of unsupported integer bitwidths, based on the memref
/// type. Returns std::nullopt on failure.
static Optional<spirv::Scope> getAtomicOpScope(MemRefType type) {
static std::optional<spirv::Scope> getAtomicOpScope(MemRefType type) {
auto sc = type.getMemorySpace().dyn_cast_or_null<spirv::StorageClassAttr>();
switch (sc.getValue()) {
case spirv::StorageClass::StorageBuffer:
Expand Down Expand Up @@ -530,7 +530,7 @@ IntStoreOpPattern::matchAndRewrite(memref::StoreOp storeOp, OpAdaptor adaptor,
storeVal = shiftValue(loc, storeVal, offset, mask, dstBits, rewriter);
Value adjustedPtr = adjustAccessChainForBitwidth(typeConverter, accessChainOp,
srcBits, dstBits, rewriter);
Optional<spirv::Scope> scope = getAtomicOpScope(memrefType);
std::optional<spirv::Scope> scope = getAtomicOpScope(memrefType);
if (!scope)
return failure();
Value result = rewriter.create<spirv::AtomicAndOp>(
Expand Down
5 changes: 3 additions & 2 deletions mlir/lib/Conversion/SCFToGPU/SCFToGPU.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,8 @@ namespace {
// Helper structure that holds common state of the loop to GPU kernel
// conversion.
struct AffineLoopToGpuConverter {
Optional<AffineForOp> collectBounds(AffineForOp forOp, unsigned numLoops);
std::optional<AffineForOp> collectBounds(AffineForOp forOp,
unsigned numLoops);

void createLaunch(AffineForOp rootForOp, AffineForOp innermostForOp,
unsigned numBlockDims, unsigned numThreadDims);
Expand Down Expand Up @@ -181,7 +182,7 @@ static bool isConstantOne(Value value) {
// This may fail if the IR for computing loop bounds cannot be constructed, for
// example if an affine loop uses semi-affine maps. Return the last loop to be
// mapped on success, std::nullopt on failure.
Optional<AffineForOp>
std::optional<AffineForOp>
AffineLoopToGpuConverter::collectBounds(AffineForOp forOp, unsigned numLoops) {
OpBuilder builder(forOp.getOperation());
dims.reserve(numLoops);
Expand Down
8 changes: 4 additions & 4 deletions mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ struct VectorToSCFPattern : public OpRewritePattern<OpTy> {
/// memref should be unpacked in the next application of TransferOpConversion.
/// A return value of std::nullopt indicates a broadcast.
template <typename OpTy>
static Optional<int64_t> unpackedDim(OpTy xferOp) {
static std::optional<int64_t> unpackedDim(OpTy xferOp) {
// TODO: support 0-d corner case.
assert(xferOp.getTransferRank() > 0 && "unexpected 0-d transfer");
auto map = xferOp.getPermutationMap();
Expand Down Expand Up @@ -159,7 +159,7 @@ static Value generateMaskCheck(OpBuilder &b, OpTy xferOp, Value iv) {
/// `resultTypes`.
template <typename OpTy>
static Value generateInBoundsCheck(
OpBuilder &b, OpTy xferOp, Value iv, Optional<int64_t> dim,
OpBuilder &b, OpTy xferOp, Value iv, std::optional<int64_t> dim,
TypeRange resultTypes,
function_ref<Value(OpBuilder &, Location)> inBoundsCase,
function_ref<Value(OpBuilder &, Location)> outOfBoundsCase = nullptr) {
Expand Down Expand Up @@ -217,7 +217,7 @@ static Value generateInBoundsCheck(
/// a return value. Consequently, this function does not have a return value.
template <typename OpTy>
static void generateInBoundsCheck(
OpBuilder &b, OpTy xferOp, Value iv, Optional<int64_t> dim,
OpBuilder &b, OpTy xferOp, Value iv, std::optional<int64_t> dim,
function_ref<void(OpBuilder &, Location)> inBoundsCase,
function_ref<void(OpBuilder &, Location)> outOfBoundsCase = nullptr) {
generateInBoundsCheck(
Expand Down Expand Up @@ -1093,7 +1093,7 @@ namespace lowering_1_d {
/// the transfer is operating. A return value of std::nullopt indicates a
/// broadcast.
template <typename OpTy>
static Optional<int64_t>
static std::optional<int64_t>
get1dMemrefIndices(OpBuilder &b, OpTy xferOp, Value iv,
SmallVector<Value, 8> &memrefIndices) {
auto indices = xferOp.getIndices();
Expand Down
6 changes: 3 additions & 3 deletions mlir/lib/Dialect/AMDGPU/IR/AMDGPUDialect.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ LogicalResult RawBufferAtomicFaddOp::verify() {
return verifyRawBufferOp(*this);
}

static Optional<uint32_t> getConstantUint32(Value v) {
static std::optional<uint32_t> getConstantUint32(Value v) {
APInt cst;
if (!v.getType().isInteger(32))
return std::nullopt;
Expand All @@ -90,7 +90,7 @@ static bool staticallyOutOfBounds(OpType op) {
return false;
int64_t result = offset + op.getIndexOffset().value_or(0);
if (op.getSgprOffset()) {
Optional<uint32_t> sgprOffset = getConstantUint32(op.getSgprOffset());
std::optional<uint32_t> sgprOffset = getConstantUint32(op.getSgprOffset());
if (!sgprOffset)
return false;
result += *sgprOffset;
Expand All @@ -101,7 +101,7 @@ static bool staticallyOutOfBounds(OpType op) {
for (auto pair : llvm::zip(strides, op.getIndices())) {
int64_t stride = std::get<0>(pair);
Value idx = std::get<1>(pair);
Optional<uint32_t> idxVal = getConstantUint32(idx);
std::optional<uint32_t> idxVal = getConstantUint32(idx);
if (!idxVal)
return false;
indexVal += stride * *idxVal;
Expand Down
6 changes: 3 additions & 3 deletions mlir/lib/Dialect/Affine/Analysis/AffineAnalysis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,8 @@ static Value getSupportedReduction(AffineForOp forOp, unsigned pos,
return nullptr;

Operation *combinerOp = combinerOps.back();
Optional<arith::AtomicRMWKind> maybeKind =
TypeSwitch<Operation *, Optional<arith::AtomicRMWKind>>(combinerOp)
std::optional<arith::AtomicRMWKind> maybeKind =
TypeSwitch<Operation *, std::optional<arith::AtomicRMWKind>>(combinerOp)
.Case([](arith::AddFOp) { return arith::AtomicRMWKind::addf; })
.Case([](arith::MulFOp) { return arith::AtomicRMWKind::mulf; })
.Case([](arith::AddIOp) { return arith::AtomicRMWKind::addi; })
Expand All @@ -65,7 +65,7 @@ static Value getSupportedReduction(AffineForOp forOp, unsigned pos,
.Case([](arith::MaxSIOp) { return arith::AtomicRMWKind::maxs; })
.Case([](arith::MinUIOp) { return arith::AtomicRMWKind::minu; })
.Case([](arith::MaxUIOp) { return arith::AtomicRMWKind::maxu; })
.Default([](Operation *) -> Optional<arith::AtomicRMWKind> {
.Default([](Operation *) -> std::optional<arith::AtomicRMWKind> {
// TODO: AtomicRMW supports other kinds of reductions this is
// currently not detecting, add those when the need arises.
return std::nullopt;
Expand Down
26 changes: 13 additions & 13 deletions mlir/lib/Dialect/Affine/Analysis/AffineStructures.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -258,7 +258,7 @@ void FlatAffineValueConstraints::reset(
unsigned newNumLocals, ArrayRef<Value> valArgs) {
assert(newNumReservedCols >= newNumDims + newNumSymbols + newNumLocals + 1 &&
"minimum 1 column");
SmallVector<Optional<Value>, 8> newVals;
SmallVector<std::optional<Value>, 8> newVals;
if (!valArgs.empty())
newVals.assign(valArgs.begin(), valArgs.end());

Expand Down Expand Up @@ -318,15 +318,15 @@ unsigned FlatAffineValueConstraints::insertVar(VarKind kind, unsigned pos,
// If a Value is provided, insert it; otherwise use None.
for (unsigned i = 0; i < num; ++i)
values.insert(values.begin() + absolutePos + i,
vals[i] ? Optional<Value>(vals[i]) : std::nullopt);
vals[i] ? std::optional<Value>(vals[i]) : std::nullopt);

assert(values.size() == getNumDimAndSymbolVars());
return absolutePos;
}

bool FlatAffineValueConstraints::hasValues() const {
return llvm::any_of(
values, [](const Optional<Value> &var) { return var.has_value(); });
values, [](const std::optional<Value> &var) { return var.has_value(); });
}

/// Checks if two constraint systems are in the same space, i.e., if they are
Expand Down Expand Up @@ -359,9 +359,9 @@ static bool LLVM_ATTRIBUTE_UNUSED areVarsUnique(
return true;

SmallPtrSet<Value, 8> uniqueVars;
ArrayRef<Optional<Value>> maybeValues =
ArrayRef<std::optional<Value>> maybeValues =
cst.getMaybeValues().slice(start, end - start);
for (Optional<Value> val : maybeValues) {
for (std::optional<Value> val : maybeValues) {
if (val && !uniqueVars.insert(*val).second)
return false;
}
Expand Down Expand Up @@ -403,13 +403,13 @@ static void mergeAndAlignVars(unsigned offset, FlatAffineValueConstraints *a,
assert(areVarsUnique(*a) && "A's values aren't unique");
assert(areVarsUnique(*b) && "B's values aren't unique");

assert(
llvm::all_of(llvm::drop_begin(a->getMaybeValues(), offset),
[](const Optional<Value> &var) { return var.has_value(); }));
assert(llvm::all_of(
llvm::drop_begin(a->getMaybeValues(), offset),
[](const std::optional<Value> &var) { return var.has_value(); }));

assert(
llvm::all_of(llvm::drop_begin(b->getMaybeValues(), offset),
[](const Optional<Value> &var) { return var.has_value(); }));
assert(llvm::all_of(
llvm::drop_begin(b->getMaybeValues(), offset),
[](const std::optional<Value> &var) { return var.has_value(); }));

SmallVector<Value, 4> aDimValues;
a->getValues(offset, a->getNumDimVars(), &aDimValues);
Expand Down Expand Up @@ -1370,7 +1370,7 @@ bool FlatAffineValueConstraints::findVar(Value val, unsigned *pos) const {
}

bool FlatAffineValueConstraints::containsVar(Value val) const {
return llvm::any_of(values, [&](const Optional<Value> &mayBeVar) {
return llvm::any_of(values, [&](const std::optional<Value> &mayBeVar) {
return mayBeVar && *mayBeVar == val;
});
}
Expand Down Expand Up @@ -1431,7 +1431,7 @@ void FlatAffineValueConstraints::clearAndCopyFrom(

void FlatAffineValueConstraints::fourierMotzkinEliminate(
unsigned pos, bool darkShadow, bool *isResultIntegerExact) {
SmallVector<Optional<Value>, 8> newVals = values;
SmallVector<std::optional<Value>, 8> newVals = values;
if (getVarKindAt(pos) != VarKind::Local)
newVals.erase(newVals.begin() + pos);
// Note: Base implementation discards all associated Values.
Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Dialect/Affine/Analysis/LoopAnalysis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ void mlir::getTripCountMapAndOperands(
/// otherwise. This method uses affine expression analysis (in turn using
/// getTripCount) and is able to determine constant trip count in non-trivial
/// cases.
Optional<uint64_t> mlir::getConstantTripCount(AffineForOp forOp) {
std::optional<uint64_t> mlir::getConstantTripCount(AffineForOp forOp) {
SmallVector<Value, 4> operands;
AffineMap map;
getTripCountMapAndOperands(forOp, &map, &operands);
Expand All @@ -92,7 +92,7 @@ Optional<uint64_t> mlir::getConstantTripCount(AffineForOp forOp) {
return std::nullopt;

// Take the min if all trip counts are constant.
Optional<uint64_t> tripCount;
std::optional<uint64_t> tripCount;
for (auto resultExpr : map.getResults()) {
if (auto constExpr = resultExpr.dyn_cast<AffineConstantExpr>()) {
if (tripCount.has_value())
Expand Down
54 changes: 28 additions & 26 deletions mlir/lib/Dialect/Affine/Analysis/Utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ void ComputationSliceState::dump() const {
/// and the dst loops for those dimensions have the same bounds. Returns false
/// if both the src and the dst loops don't have the same bounds. Returns
/// std::nullopt if none of the above can be proven.
Optional<bool> ComputationSliceState::isSliceMaximalFastCheck() const {
std::optional<bool> ComputationSliceState::isSliceMaximalFastCheck() const {
assert(lbs.size() == ubs.size() && !lbs.empty() && !ivs.empty() &&
"Unexpected number of lbs, ubs and ivs in slice");

Expand Down Expand Up @@ -215,7 +215,7 @@ Optional<bool> ComputationSliceState::isSliceMaximalFastCheck() const {
/// Returns true if it is deterministically verified that the original iteration
/// space of the slice is contained within the new iteration space that is
/// created after fusing 'this' slice into its destination.
Optional<bool> ComputationSliceState::isSliceValid() {
std::optional<bool> ComputationSliceState::isSliceValid() {
// Fast check to determine if the slice is valid. If the following conditions
// are verified to be true, slice is declared valid by the fast check:
// 1. Each slice loop is a single iteration loop bound in terms of a single
Expand All @@ -226,7 +226,7 @@ Optional<bool> ComputationSliceState::isSliceValid() {
// expensive analysis.
// TODO: Store the result of the fast check, as it might be used again in
// `canRemoveSrcNodeAfterFusion`.
Optional<bool> isValidFastCheck = isSliceMaximalFastCheck();
std::optional<bool> isValidFastCheck = isSliceMaximalFastCheck();
if (isValidFastCheck && *isValidFastCheck)
return true;

Expand Down Expand Up @@ -285,10 +285,10 @@ Optional<bool> ComputationSliceState::isSliceValid() {
/// Returns true if the computation slice encloses all the iterations of the
/// sliced loop nest. Returns false if it does not. Returns std::nullopt if it
/// cannot determine if the slice is maximal or not.
Optional<bool> ComputationSliceState::isMaximal() const {
std::optional<bool> ComputationSliceState::isMaximal() const {
// Fast check to determine if the computation slice is maximal. If the result
// is inconclusive, we proceed with a more expensive analysis.
Optional<bool> isMaximalFastCheck = isSliceMaximalFastCheck();
std::optional<bool> isMaximalFastCheck = isSliceMaximalFastCheck();
if (isMaximalFastCheck)
return isMaximalFastCheck;

Expand Down Expand Up @@ -339,7 +339,7 @@ unsigned MemRefRegion::getRank() const {
return memref.getType().cast<MemRefType>().getRank();
}

Optional<int64_t> MemRefRegion::getConstantBoundingSizeAndShape(
std::optional<int64_t> MemRefRegion::getConstantBoundingSizeAndShape(
SmallVectorImpl<int64_t> *shape, std::vector<SmallVector<int64_t, 4>> *lbs,
SmallVectorImpl<int64_t> *lbDivisors) const {
auto memRefType = memref.getType().cast<MemRefType>();
Expand Down Expand Up @@ -370,7 +370,7 @@ Optional<int64_t> MemRefRegion::getConstantBoundingSizeAndShape(
int64_t lbDivisor;
for (unsigned d = 0; d < rank; d++) {
SmallVector<int64_t, 4> lb;
Optional<int64_t> diff =
std::optional<int64_t> diff =
cstWithShapeBounds.getConstantBoundOnDimSize64(d, &lb, &lbDivisor);
if (diff.has_value()) {
diffConstant = *diff;
Expand Down Expand Up @@ -611,7 +611,7 @@ static unsigned getMemRefEltSizeInBytes(MemRefType memRefType) {
}

// Returns the size of the region.
Optional<int64_t> MemRefRegion::getRegionSize() {
std::optional<int64_t> MemRefRegion::getRegionSize() {
auto memRefType = memref.getType().cast<MemRefType>();

if (!memRefType.getLayout().isIdentity()) {
Expand All @@ -626,7 +626,7 @@ Optional<int64_t> MemRefRegion::getRegionSize() {
SmallVector<Value, 4> bufIndices;

// Compute the extents of the buffer.
Optional<int64_t> numElements = getConstantBoundingSizeAndShape();
std::optional<int64_t> numElements = getConstantBoundingSizeAndShape();
if (!numElements) {
LLVM_DEBUG(llvm::dbgs() << "Dynamic shapes not yet supported\n");
return std::nullopt;
Expand All @@ -638,7 +638,7 @@ Optional<int64_t> MemRefRegion::getRegionSize() {
/// std::nullopt otherwise. If the element of the memref has vector type, takes
/// into account size of the vector as well.
// TODO: improve/complete this when we have target data.
Optional<uint64_t> mlir::getMemRefSizeInBytes(MemRefType memRefType) {
std::optional<uint64_t> mlir::getMemRefSizeInBytes(MemRefType memRefType) {
if (!memRefType.hasStaticShape())
return std::nullopt;
auto elementType = memRefType.getElementType();
Expand Down Expand Up @@ -956,7 +956,7 @@ mlir::computeSliceUnion(ArrayRef<Operation *> opsA, ArrayRef<Operation *> opsB,

// Check if the slice computed is valid. Return success only if it is verified
// that the slice is valid, otherwise return appropriate failure status.
Optional<bool> isSliceValid = sliceUnion->isSliceValid();
std::optional<bool> isSliceValid = sliceUnion->isSliceValid();
if (!isSliceValid) {
LLVM_DEBUG(llvm::dbgs() << "Cannot determine if the slice is valid\n");
return SliceComputationResult::GenericFailure;
Expand All @@ -968,7 +968,8 @@ mlir::computeSliceUnion(ArrayRef<Operation *> opsA, ArrayRef<Operation *> opsB,
}

// TODO: extend this to handle multiple result maps.
static Optional<uint64_t> getConstDifference(AffineMap lbMap, AffineMap ubMap) {
static std::optional<uint64_t> getConstDifference(AffineMap lbMap,
AffineMap ubMap) {
assert(lbMap.getNumResults() == 1 && "expected single result bound map");
assert(ubMap.getNumResults() == 1 && "expected single result bound map");
assert(lbMap.getNumDims() == ubMap.getNumDims());
Expand Down Expand Up @@ -1008,14 +1009,14 @@ bool mlir::buildSliceTripCountMap(
forOp.getConstantUpperBound() - forOp.getConstantLowerBound();
continue;
}
Optional<uint64_t> maybeConstTripCount = getConstantTripCount(forOp);
std::optional<uint64_t> maybeConstTripCount = getConstantTripCount(forOp);
if (maybeConstTripCount.has_value()) {
(*tripCountMap)[op] = *maybeConstTripCount;
continue;
}
return false;
}
Optional<uint64_t> tripCount = getConstDifference(lbMap, ubMap);
std::optional<uint64_t> tripCount = getConstDifference(lbMap, ubMap);
// Slice bounds are created with a constant ub - lb difference.
if (!tripCount.has_value())
return false;
Expand Down Expand Up @@ -1129,7 +1130,7 @@ void mlir::getComputationSliceState(
// 1. Slice is single trip count.
// 2. Loop bounds of the source and destination match.
// 3. Is being inserted at the innermost insertion point.
Optional<bool> isMaximal = sliceState->isMaximal();
std::optional<bool> isMaximal = sliceState->isMaximal();
if (isLoopParallelAndContainsReduction(getSliceLoop(i)) &&
isInnermostInsertion() && srcIsUnitSlice() && isMaximal && *isMaximal)
continue;
Expand Down Expand Up @@ -1297,10 +1298,10 @@ unsigned mlir::getNumCommonSurroundingLoops(Operation &a, Operation &b) {
return numCommonLoops;
}

static Optional<int64_t> getMemoryFootprintBytes(Block &block,
Block::iterator start,
Block::iterator end,
int memorySpace) {
static std::optional<int64_t> getMemoryFootprintBytes(Block &block,
Block::iterator start,
Block::iterator end,
int memorySpace) {
SmallDenseMap<Value, std::unique_ptr<MemRefRegion>, 4> regions;

// Walk this 'affine.for' operation to gather all memory regions.
Expand Down Expand Up @@ -1333,16 +1334,16 @@ static Optional<int64_t> getMemoryFootprintBytes(Block &block,

int64_t totalSizeInBytes = 0;
for (const auto &region : regions) {
Optional<int64_t> size = region.second->getRegionSize();
std::optional<int64_t> size = region.second->getRegionSize();
if (!size.has_value())
return std::nullopt;
totalSizeInBytes += *size;
}
return totalSizeInBytes;
}

Optional<int64_t> mlir::getMemoryFootprintBytes(AffineForOp forOp,
int memorySpace) {
std::optional<int64_t> mlir::getMemoryFootprintBytes(AffineForOp forOp,
int memorySpace) {
auto *forInst = forOp.getOperation();
return ::getMemoryFootprintBytes(
*forInst->getBlock(), Block::iterator(forInst),
Expand Down Expand Up @@ -1380,11 +1381,12 @@ IntegerSet mlir::simplifyIntegerSet(IntegerSet set) {
return simplifiedSet;
}

static void unpackOptionalValues(ArrayRef<Optional<Value>> source,
static void unpackOptionalValues(ArrayRef<std::optional<Value>> source,
SmallVector<Value> &target) {
target = llvm::to_vector<4>(llvm::map_range(source, [](Optional<Value> val) {
return val.has_value() ? *val : Value();
}));
target =
llvm::to_vector<4>(llvm::map_range(source, [](std::optional<Value> val) {
return val.has_value() ? *val : Value();
}));
}

/// Bound an identifier `pos` in a given FlatAffineValueConstraints with
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ void AffineDataCopyGeneration::runOnBlock(Block *block,

// Returns true if the footprint is known to exceed capacity.
auto exceedsCapacity = [&](AffineForOp forOp) {
Optional<int64_t> footprint =
std::optional<int64_t> footprint =
getMemoryFootprintBytes(forOp,
/*memorySpace=*/0);
return (footprint.has_value() &&
Expand Down
16 changes: 8 additions & 8 deletions mlir/lib/Dialect/Affine/Transforms/LoopFusion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -651,7 +651,7 @@ static bool canRemoveSrcNodeAfterFusion(
// escaping memref, we can only remove it if the fusion slice is maximal so
// that all the dependences are preserved.
if (hasOutDepsAfterFusion || !escapingMemRefs.empty()) {
Optional<bool> isMaximal = fusionSlice.isMaximal();
std::optional<bool> isMaximal = fusionSlice.isMaximal();
if (!isMaximal) {
LLVM_DEBUG(llvm::dbgs() << "Src loop can't be removed: can't determine "
"if fusion is maximal\n");
Expand Down Expand Up @@ -926,7 +926,7 @@ static unsigned getMemRefEltSizeInBytes(MemRefType memRefType) {
// this one.
static Value createPrivateMemRef(AffineForOp forOp, Operation *srcStoreOpInst,
unsigned dstLoopDepth,
Optional<unsigned> fastMemorySpace,
std::optional<unsigned> fastMemorySpace,
uint64_t localBufSizeThreshold) {
Operation *forInst = forOp.getOperation();

Expand All @@ -950,7 +950,7 @@ static Value createPrivateMemRef(AffineForOp forOp, Operation *srcStoreOpInst,
lbs.reserve(rank);
// Query 'region' for 'newShape' and lower bounds of MemRefRegion accessed
// by 'srcStoreOpInst' at depth 'dstLoopDepth'.
Optional<int64_t> numElements =
std::optional<int64_t> numElements =
region.getConstantBoundingSizeAndShape(&newShape, &lbs, &lbDivisors);
assert(numElements && "non-constant number of elts in local buffer");

Expand Down Expand Up @@ -1176,7 +1176,7 @@ static bool isFusionProfitable(Operation *srcOpInst, Operation *srcStoreOpInst,
return false;
}

Optional<int64_t> maybeSrcWriteRegionSizeBytes =
std::optional<int64_t> maybeSrcWriteRegionSizeBytes =
srcWriteRegion.getRegionSize();
if (!maybeSrcWriteRegionSizeBytes.has_value())
return false;
Expand Down Expand Up @@ -1218,7 +1218,7 @@ static bool isFusionProfitable(Operation *srcOpInst, Operation *srcStoreOpInst,
continue;
}

Optional<int64_t> maybeSliceWriteRegionSizeBytes =
std::optional<int64_t> maybeSliceWriteRegionSizeBytes =
sliceWriteRegion.getRegionSize();
if (!maybeSliceWriteRegionSizeBytes.has_value() ||
*maybeSliceWriteRegionSizeBytes == 0) {
Expand Down Expand Up @@ -1398,7 +1398,7 @@ struct GreedyFusion {
// Parameter for local buffer size threshold.
unsigned localBufSizeThreshold;
// Parameter for fast memory space.
Optional<unsigned> fastMemorySpace;
std::optional<unsigned> fastMemorySpace;
// If true, ignore any additional (redundant) computation tolerance threshold
// that would have prevented fusion.
bool maximalFusion;
Expand All @@ -1409,7 +1409,7 @@ struct GreedyFusion {
using Node = MemRefDependenceGraph::Node;

GreedyFusion(MemRefDependenceGraph *mdg, unsigned localBufSizeThreshold,
Optional<unsigned> fastMemorySpace, bool maximalFusion,
std::optional<unsigned> fastMemorySpace, bool maximalFusion,
double computeToleranceThreshold)
: mdg(mdg), localBufSizeThreshold(localBufSizeThreshold),
fastMemorySpace(fastMemorySpace), maximalFusion(maximalFusion),
Expand Down Expand Up @@ -2016,7 +2016,7 @@ void LoopFusion::runOnBlock(Block *block) {
if (!g.init(block))
return;

Optional<unsigned> fastMemorySpaceOpt;
std::optional<unsigned> fastMemorySpaceOpt;
if (fastMemorySpace.hasValue())
fastMemorySpaceOpt = fastMemorySpace;
unsigned localBufSizeThresholdBytes = localBufSizeThreshold * 1024;
Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Dialect/Affine/Transforms/LoopTiling.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ static void adjustToDivisorsOfTripCounts(ArrayRef<AffineForOp> band,
assert(band.size() == tileSizes->size() && "invalid tile size count");
for (unsigned i = 0, e = band.size(); i < e; i++) {
unsigned &tSizeAdjusted = (*tileSizes)[i];
Optional<uint64_t> mayConst = getConstantTripCount(band[i]);
std::optional<uint64_t> mayConst = getConstantTripCount(band[i]);
if (!mayConst)
continue;
// Adjust the tile size to largest factor of the trip count less than
Expand Down Expand Up @@ -122,7 +122,7 @@ void LoopTiling::getTileSizes(ArrayRef<AffineForOp> band,
// the cache size. This is an approximation with the assumption that the
// footprint increases with the tile size linearly in that dimension (i.e.,
// assumes one-to-one access function).
Optional<int64_t> fp = getMemoryFootprintBytes(band[0], 0);
std::optional<int64_t> fp = getMemoryFootprintBytes(band[0], 0);
if (!fp) {
// Fill with default tile sizes if footprint is unknown.
std::fill(tileSizes->begin(), tileSizes->end(),
Expand Down
6 changes: 3 additions & 3 deletions mlir/lib/Dialect/Affine/Transforms/LoopUnroll.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ struct LoopUnroll : public impl::AffineLoopUnrollBase<LoopUnroll> {

= default;
explicit LoopUnroll(
Optional<unsigned> unrollFactor = std::nullopt,
std::optional<unsigned> unrollFactor = std::nullopt,
bool unrollUpToFactor = false, bool unrollFull = false,
const std::function<unsigned(AffineForOp)> &getUnrollFactor = nullptr)
: getUnrollFactor(getUnrollFactor) {
Expand Down Expand Up @@ -100,7 +100,7 @@ void LoopUnroll::runOnOperation() {
// so that loops are gathered from innermost to outermost (or else unrolling
// an outer one may delete gathered inner ones).
getOperation().walk([&](AffineForOp forOp) {
Optional<uint64_t> tripCount = getConstantTripCount(forOp);
std::optional<uint64_t> tripCount = getConstantTripCount(forOp);
if (tripCount && *tripCount <= unrollFullThreshold)
loops.push_back(forOp);
});
Expand Down Expand Up @@ -146,6 +146,6 @@ std::unique_ptr<OperationPass<func::FuncOp>> mlir::createLoopUnrollPass(
int unrollFactor, bool unrollUpToFactor, bool unrollFull,
const std::function<unsigned(AffineForOp)> &getUnrollFactor) {
return std::make_unique<LoopUnroll>(
unrollFactor == -1 ? std::nullopt : Optional<unsigned>(unrollFactor),
unrollFactor == -1 ? std::nullopt : std::optional<unsigned>(unrollFactor),
unrollUpToFactor, unrollFull, getUnrollFactor);
}
5 changes: 3 additions & 2 deletions mlir/lib/Dialect/Affine/Transforms/LoopUnrollAndJam.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,8 @@ namespace {
/// outer loop in a Function.
struct LoopUnrollAndJam
: public impl::AffineLoopUnrollAndJamBase<LoopUnrollAndJam> {
explicit LoopUnrollAndJam(Optional<unsigned> unrollJamFactor = std::nullopt) {
explicit LoopUnrollAndJam(
std::optional<unsigned> unrollJamFactor = std::nullopt) {
if (unrollJamFactor)
this->unrollJamFactor = *unrollJamFactor;
}
Expand All @@ -75,7 +76,7 @@ std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createLoopUnrollAndJamPass(int unrollJamFactor) {
return std::make_unique<LoopUnrollAndJam>(
unrollJamFactor == -1 ? std::nullopt
: Optional<unsigned>(unrollJamFactor));
: std::optional<unsigned>(unrollJamFactor));
}

void LoopUnrollAndJam::runOnOperation() {
Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -582,7 +582,7 @@ isVectorizableLoopPtrFactory(const DenseSet<Operation *> &parallelLoops,
/// Up to 3-D patterns are supported.
/// If the command line argument requests a pattern of higher order, returns an
/// empty pattern list which will conservatively result in no vectorization.
static Optional<NestedPattern>
static std::optional<NestedPattern>
makePattern(const DenseSet<Operation *> &parallelLoops, int vectorRank,
ArrayRef<int64_t> fastestVaryingPattern) {
using matcher::For;
Expand Down Expand Up @@ -1666,7 +1666,7 @@ static void vectorizeLoops(Operation *parentOp, DenseSet<Operation *> &loops,
"Vectorizing reductions is supported only for 1-D vectors");

// Compute 1-D, 2-D or 3-D loop pattern to be matched on the target loops.
Optional<NestedPattern> pattern =
std::optional<NestedPattern> pattern =
makePattern(loops, vectorSizes.size(), fastestVaryingPattern);
if (!pattern) {
LLVM_DEBUG(dbgs() << "\n[early-vect] pattern couldn't be computed\n");
Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Dialect/Affine/Utils/LoopFusionUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -358,7 +358,7 @@ FusionResult mlir::canFuseLoops(AffineForOp srcForOp, AffineForOp dstForOp,
LogicalResult promoteSingleIterReductionLoop(AffineForOp forOp,
bool siblingFusionUser) {
// Check if the reduction loop is a single iteration loop.
Optional<uint64_t> tripCount = getConstantTripCount(forOp);
std::optional<uint64_t> tripCount = getConstantTripCount(forOp);
if (!tripCount || *tripCount != 1)
return failure();
auto iterOperands = forOp.getIterOperands();
Expand Down Expand Up @@ -491,7 +491,7 @@ bool mlir::getLoopNestStats(AffineForOp forOpRoot, LoopNestStats *stats) {

// Record trip count for 'forOp'. Set flag if trip count is not
// constant.
Optional<uint64_t> maybeConstTripCount = getConstantTripCount(forOp);
std::optional<uint64_t> maybeConstTripCount = getConstantTripCount(forOp);
if (!maybeConstTripCount) {
// Currently only constant trip count loop nests are supported.
LLVM_DEBUG(llvm::dbgs() << "Non-constant trip count unsupported\n");
Expand Down
21 changes: 11 additions & 10 deletions mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ static void replaceIterArgsAndYieldResults(AffineForOp forOp) {
/// was known to have a single iteration.
// TODO: extend this for arbitrary affine bounds.
LogicalResult mlir::promoteIfSingleIteration(AffineForOp forOp) {
Optional<uint64_t> tripCount = getConstantTripCount(forOp);
std::optional<uint64_t> tripCount = getConstantTripCount(forOp);
if (!tripCount || *tripCount != 1)
return failure();

Expand Down Expand Up @@ -791,7 +791,8 @@ constructTiledIndexSetHyperRect(MutableArrayRef<AffineForOp> origLoops,
// Bounds for intra-tile loops.
for (unsigned i = 0; i < width; i++) {
int64_t largestDiv = getLargestDivisorOfTripCount(origLoops[i]);
Optional<uint64_t> mayBeConstantCount = getConstantTripCount(origLoops[i]);
std::optional<uint64_t> mayBeConstantCount =
getConstantTripCount(origLoops[i]);
// The lower bound is just the tile-space loop.
AffineMap lbMap = b.getDimIdentityMap();
newLoops[width + i].setLowerBound(
Expand Down Expand Up @@ -971,7 +972,7 @@ void mlir::getTileableBands(func::FuncOp f,

/// Unrolls this loop completely.
LogicalResult mlir::loopUnrollFull(AffineForOp forOp) {
Optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp);
std::optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp);
if (mayBeConstantTripCount.has_value()) {
uint64_t tripCount = *mayBeConstantTripCount;
if (tripCount == 0)
Expand All @@ -987,7 +988,7 @@ LogicalResult mlir::loopUnrollFull(AffineForOp forOp) {
/// whichever is lower.
LogicalResult mlir::loopUnrollUpToFactor(AffineForOp forOp,
uint64_t unrollFactor) {
Optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp);
std::optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp);
if (mayBeConstantTripCount.has_value() &&
*mayBeConstantTripCount < unrollFactor)
return loopUnrollByFactor(forOp, *mayBeConstantTripCount);
Expand Down Expand Up @@ -1093,7 +1094,7 @@ LogicalResult mlir::loopUnrollByFactor(
bool cleanUpUnroll) {
assert(unrollFactor > 0 && "unroll factor should be positive");

Optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp);
std::optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp);
if (unrollFactor == 1) {
if (mayBeConstantTripCount && *mayBeConstantTripCount == 1 &&
failed(promoteIfSingleIteration(forOp)))
Expand Down Expand Up @@ -1156,7 +1157,7 @@ LogicalResult mlir::loopUnrollByFactor(

LogicalResult mlir::loopUnrollJamUpToFactor(AffineForOp forOp,
uint64_t unrollJamFactor) {
Optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp);
std::optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp);
if (mayBeConstantTripCount.has_value() &&
*mayBeConstantTripCount < unrollJamFactor)
return loopUnrollJamByFactor(forOp, *mayBeConstantTripCount);
Expand Down Expand Up @@ -1209,7 +1210,7 @@ LogicalResult mlir::loopUnrollJamByFactor(AffineForOp forOp,
uint64_t unrollJamFactor) {
assert(unrollJamFactor > 0 && "unroll jam factor should be positive");

Optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp);
std::optional<uint64_t> mayBeConstantTripCount = getConstantTripCount(forOp);
if (unrollJamFactor == 1) {
if (mayBeConstantTripCount && *mayBeConstantTripCount == 1 &&
failed(promoteIfSingleIteration(forOp)))
Expand Down Expand Up @@ -2095,7 +2096,7 @@ static LogicalResult generateCopy(
std::vector<SmallVector<int64_t, 4>> lbs;
SmallVector<int64_t, 8> lbDivisors;
lbs.reserve(rank);
Optional<int64_t> numElements = region.getConstantBoundingSizeAndShape(
std::optional<int64_t> numElements = region.getConstantBoundingSizeAndShape(
&fastBufferShape, &lbs, &lbDivisors);
if (!numElements) {
LLVM_DEBUG(llvm::dbgs() << "Non-constant region size not supported\n");
Expand Down Expand Up @@ -2376,7 +2377,7 @@ static bool getFullMemRefAsRegion(Operation *op, unsigned numParamLoopIVs,
LogicalResult mlir::affineDataCopyGenerate(Block::iterator begin,
Block::iterator end,
const AffineCopyOptions &copyOptions,
Optional<Value> filterMemRef,
std::optional<Value> filterMemRef,
DenseSet<Operation *> &copyNests) {
if (begin == end)
return success();
Expand Down Expand Up @@ -2565,7 +2566,7 @@ LogicalResult mlir::affineDataCopyGenerate(Block::iterator begin,
// an AffineForOp.
LogicalResult mlir::affineDataCopyGenerate(AffineForOp forOp,
const AffineCopyOptions &copyOptions,
Optional<Value> filterMemRef,
std::optional<Value> filterMemRef,
DenseSet<Operation *> &copyNests) {
return affineDataCopyGenerate(forOp.getBody()->begin(),
std::prev(forOp.getBody()->end()), copyOptions,
Expand Down
9 changes: 4 additions & 5 deletions mlir/lib/Dialect/Affine/Utils/Utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -217,10 +217,9 @@ mlir::Value mlir::expandAffineExpr(OpBuilder &builder, Location loc,

/// Create a sequence of operations that implement the `affineMap` applied to
/// the given `operands` (as it it were an AffineApplyOp).
Optional<SmallVector<Value, 8>> mlir::expandAffineMap(OpBuilder &builder,
Location loc,
AffineMap affineMap,
ValueRange operands) {
std::optional<SmallVector<Value, 8>>
mlir::expandAffineMap(OpBuilder &builder, Location loc, AffineMap affineMap,
ValueRange operands) {
auto numDims = affineMap.getNumDims();
auto expanded = llvm::to_vector<8>(
llvm::map_range(affineMap.getResults(),
Expand Down Expand Up @@ -1817,7 +1816,7 @@ MemRefType mlir::normalizeMemRefType(MemRefType memrefType,
newShape[d] = ShapedType::kDynamic;
} else {
// The lower bound for the shape is always zero.
Optional<int64_t> ubConst =
std::optional<int64_t> ubConst =
fac.getConstantBound64(IntegerPolyhedron::UB, d);
// For a static memref and an affine map with no symbols, this is
// always bounded. However, when we have symbols, we may not be able to
Expand Down
96 changes: 55 additions & 41 deletions mlir/lib/Dialect/Arith/IR/InferIntRangeInterfaceImpls.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ using namespace mlir::arith;
/// Function that evaluates the result of doing something on arithmetic
/// constants and returns std::nullopt on overflow.
using ConstArithFn =
function_ref<Optional<APInt>(const APInt &, const APInt &)>;
function_ref<std::optional<APInt>(const APInt &, const APInt &)>;

/// Return the maxmially wide signed or unsigned range for a given bitwidth.

Expand All @@ -30,8 +30,8 @@ static ConstantIntRanges computeBoundsBy(ConstArithFn op, const APInt &minLeft,
const APInt &minRight,
const APInt &maxLeft,
const APInt &maxRight, bool isSigned) {
Optional<APInt> maybeMin = op(minLeft, minRight);
Optional<APInt> maybeMax = op(maxLeft, maxRight);
std::optional<APInt> maybeMin = op(minLeft, minRight);
std::optional<APInt> maybeMax = op(maxLeft, maxRight);
if (maybeMin && maybeMax)
return ConstantIntRanges::range(*maybeMin, *maybeMax, isSigned);
return ConstantIntRanges::maxRange(minLeft.getBitWidth());
Expand All @@ -48,7 +48,7 @@ static ConstantIntRanges minMaxBy(ConstArithFn op, ArrayRef<APInt> lhs,
isSigned ? APInt::getSignedMinValue(width) : APInt::getZero(width);
for (const APInt &left : lhs) {
for (const APInt &right : rhs) {
Optional<APInt> maybeThisResult = op(left, right);
std::optional<APInt> maybeThisResult = op(left, right);
if (!maybeThisResult)
return ConstantIntRanges::maxRange(width);
APInt result = std::move(*maybeThisResult);
Expand Down Expand Up @@ -79,15 +79,17 @@ void arith::ConstantOp::inferResultRanges(ArrayRef<ConstantIntRanges> argRanges,
void arith::AddIOp::inferResultRanges(ArrayRef<ConstantIntRanges> argRanges,
SetIntRangeFn setResultRange) {
const ConstantIntRanges &lhs = argRanges[0], &rhs = argRanges[1];
ConstArithFn uadd = [](const APInt &a, const APInt &b) -> Optional<APInt> {
ConstArithFn uadd = [](const APInt &a,
const APInt &b) -> std::optional<APInt> {
bool overflowed = false;
APInt result = a.uadd_ov(b, overflowed);
return overflowed ? Optional<APInt>() : result;
return overflowed ? std::optional<APInt>() : result;
};
ConstArithFn sadd = [](const APInt &a, const APInt &b) -> Optional<APInt> {
ConstArithFn sadd = [](const APInt &a,
const APInt &b) -> std::optional<APInt> {
bool overflowed = false;
APInt result = a.sadd_ov(b, overflowed);
return overflowed ? Optional<APInt>() : result;
return overflowed ? std::optional<APInt>() : result;
};

ConstantIntRanges urange = computeBoundsBy(
Expand All @@ -105,15 +107,17 @@ void arith::SubIOp::inferResultRanges(ArrayRef<ConstantIntRanges> argRanges,
SetIntRangeFn setResultRange) {
const ConstantIntRanges &lhs = argRanges[0], &rhs = argRanges[1];

ConstArithFn usub = [](const APInt &a, const APInt &b) -> Optional<APInt> {
ConstArithFn usub = [](const APInt &a,
const APInt &b) -> std::optional<APInt> {
bool overflowed = false;
APInt result = a.usub_ov(b, overflowed);
return overflowed ? Optional<APInt>() : result;
return overflowed ? std::optional<APInt>() : result;
};
ConstArithFn ssub = [](const APInt &a, const APInt &b) -> Optional<APInt> {
ConstArithFn ssub = [](const APInt &a,
const APInt &b) -> std::optional<APInt> {
bool overflowed = false;
APInt result = a.ssub_ov(b, overflowed);
return overflowed ? Optional<APInt>() : result;
return overflowed ? std::optional<APInt>() : result;
};
ConstantIntRanges urange = computeBoundsBy(
usub, lhs.umin(), rhs.umax(), lhs.umax(), rhs.umin(), /*isSigned=*/false);
Expand All @@ -130,15 +134,17 @@ void arith::MulIOp::inferResultRanges(ArrayRef<ConstantIntRanges> argRanges,
SetIntRangeFn setResultRange) {
const ConstantIntRanges &lhs = argRanges[0], &rhs = argRanges[1];

ConstArithFn umul = [](const APInt &a, const APInt &b) -> Optional<APInt> {
ConstArithFn umul = [](const APInt &a,
const APInt &b) -> std::optional<APInt> {
bool overflowed = false;
APInt result = a.umul_ov(b, overflowed);
return overflowed ? Optional<APInt>() : result;
return overflowed ? std::optional<APInt>() : result;
};
ConstArithFn smul = [](const APInt &a, const APInt &b) -> Optional<APInt> {
ConstArithFn smul = [](const APInt &a,
const APInt &b) -> std::optional<APInt> {
bool overflowed = false;
APInt result = a.smul_ov(b, overflowed);
return overflowed ? Optional<APInt>() : result;
return overflowed ? std::optional<APInt>() : result;
};

ConstantIntRanges urange =
Expand All @@ -157,7 +163,7 @@ void arith::MulIOp::inferResultRanges(ArrayRef<ConstantIntRanges> argRanges,

/// Fix up division results (ex. for ceiling and floor), returning an APInt
/// if there has been no overflow
using DivisionFixupFn = function_ref<Optional<APInt>(
using DivisionFixupFn = function_ref<std::optional<APInt>(
const APInt &lhs, const APInt &rhs, const APInt &result)>;

static ConstantIntRanges inferDivUIRange(const ConstantIntRanges &lhs,
Expand All @@ -167,7 +173,8 @@ static ConstantIntRanges inferDivUIRange(const ConstantIntRanges &lhs,
&rhsMax = rhs.umax();

if (!rhsMin.isZero()) {
auto udiv = [&fixup](const APInt &a, const APInt &b) -> Optional<APInt> {
auto udiv = [&fixup](const APInt &a,
const APInt &b) -> std::optional<APInt> {
return fixup(a, b, a.udiv(b));
};
return minMaxBy(udiv, {lhsMin, lhsMax}, {rhsMin, rhsMax},
Expand Down Expand Up @@ -197,10 +204,11 @@ static ConstantIntRanges inferDivSIRange(const ConstantIntRanges &lhs,
bool canDivide = rhsMin.isStrictlyPositive() || rhsMax.isNegative();

if (canDivide) {
auto sdiv = [&fixup](const APInt &a, const APInt &b) -> Optional<APInt> {
auto sdiv = [&fixup](const APInt &a,
const APInt &b) -> std::optional<APInt> {
bool overflowed = false;
APInt result = a.sdiv_ov(b, overflowed);
return overflowed ? Optional<APInt>() : fixup(a, b, result);
return overflowed ? std::optional<APInt>() : fixup(a, b, result);
};
return minMaxBy(sdiv, {lhsMin, lhsMax}, {rhsMin, rhsMax},
/*isSigned=*/true);
Expand All @@ -224,13 +232,14 @@ void arith::CeilDivUIOp::inferResultRanges(
ArrayRef<ConstantIntRanges> argRanges, SetIntRangeFn setResultRange) {
const ConstantIntRanges &lhs = argRanges[0], &rhs = argRanges[1];

DivisionFixupFn ceilDivUIFix = [](const APInt &lhs, const APInt &rhs,
const APInt &result) -> Optional<APInt> {
DivisionFixupFn ceilDivUIFix =
[](const APInt &lhs, const APInt &rhs,
const APInt &result) -> std::optional<APInt> {
if (!lhs.urem(rhs).isZero()) {
bool overflowed = false;
APInt corrected =
result.uadd_ov(APInt(result.getBitWidth(), 1), overflowed);
return overflowed ? Optional<APInt>() : corrected;
return overflowed ? std::optional<APInt>() : corrected;
}
return result;
};
Expand All @@ -245,13 +254,14 @@ void arith::CeilDivSIOp::inferResultRanges(
ArrayRef<ConstantIntRanges> argRanges, SetIntRangeFn setResultRange) {
const ConstantIntRanges &lhs = argRanges[0], &rhs = argRanges[1];

DivisionFixupFn ceilDivSIFix = [](const APInt &lhs, const APInt &rhs,
const APInt &result) -> Optional<APInt> {
DivisionFixupFn ceilDivSIFix =
[](const APInt &lhs, const APInt &rhs,
const APInt &result) -> std::optional<APInt> {
if (!lhs.srem(rhs).isZero() && lhs.isNonNegative() == rhs.isNonNegative()) {
bool overflowed = false;
APInt corrected =
result.sadd_ov(APInt(result.getBitWidth(), 1), overflowed);
return overflowed ? Optional<APInt>() : corrected;
return overflowed ? std::optional<APInt>() : corrected;
}
return result;
};
Expand All @@ -266,13 +276,14 @@ void arith::FloorDivSIOp::inferResultRanges(
ArrayRef<ConstantIntRanges> argRanges, SetIntRangeFn setResultRange) {
const ConstantIntRanges &lhs = argRanges[0], &rhs = argRanges[1];

DivisionFixupFn floorDivSIFix = [](const APInt &lhs, const APInt &rhs,
const APInt &result) -> Optional<APInt> {
DivisionFixupFn floorDivSIFix =
[](const APInt &lhs, const APInt &rhs,
const APInt &result) -> std::optional<APInt> {
if (!lhs.srem(rhs).isZero() && lhs.isNonNegative() != rhs.isNonNegative()) {
bool overflowed = false;
APInt corrected =
result.ssub_ov(APInt(result.getBitWidth(), 1), overflowed);
return overflowed ? Optional<APInt>() : corrected;
return overflowed ? std::optional<APInt>() : corrected;
}
return result;
};
Expand Down Expand Up @@ -371,7 +382,7 @@ void arith::AndIOp::inferResultRanges(ArrayRef<ConstantIntRanges> argRanges,
SetIntRangeFn setResultRange) {
auto [lhsZeros, lhsOnes] = widenBitwiseBounds(argRanges[0]);
auto [rhsZeros, rhsOnes] = widenBitwiseBounds(argRanges[1]);
auto andi = [](const APInt &a, const APInt &b) -> Optional<APInt> {
auto andi = [](const APInt &a, const APInt &b) -> std::optional<APInt> {
return a & b;
};
setResultRange(getResult(),
Expand All @@ -387,7 +398,7 @@ void arith::OrIOp::inferResultRanges(ArrayRef<ConstantIntRanges> argRanges,
SetIntRangeFn setResultRange) {
auto [lhsZeros, lhsOnes] = widenBitwiseBounds(argRanges[0]);
auto [rhsZeros, rhsOnes] = widenBitwiseBounds(argRanges[1]);
auto ori = [](const APInt &a, const APInt &b) -> Optional<APInt> {
auto ori = [](const APInt &a, const APInt &b) -> std::optional<APInt> {
return a | b;
};
setResultRange(getResult(),
Expand All @@ -403,7 +414,7 @@ void arith::XOrIOp::inferResultRanges(ArrayRef<ConstantIntRanges> argRanges,
SetIntRangeFn setResultRange) {
auto [lhsZeros, lhsOnes] = widenBitwiseBounds(argRanges[0]);
auto [rhsZeros, rhsOnes] = widenBitwiseBounds(argRanges[1]);
auto xori = [](const APInt &a, const APInt &b) -> Optional<APInt> {
auto xori = [](const APInt &a, const APInt &b) -> std::optional<APInt> {
return a ^ b;
};
setResultRange(getResult(),
Expand Down Expand Up @@ -604,8 +615,8 @@ bool isStaticallyTrue(arith::CmpIPredicate pred, const ConstantIntRanges &lhs,
case arith::CmpIPredicate::ugt:
return applyCmpPredicate(pred, lhs.umin(), rhs.umax());
case arith::CmpIPredicate::eq: {
Optional<APInt> lhsConst = lhs.getConstantValue();
Optional<APInt> rhsConst = rhs.getConstantValue();
std::optional<APInt> lhsConst = lhs.getConstantValue();
std::optional<APInt> rhsConst = rhs.getConstantValue();
return lhsConst && rhsConst && lhsConst == rhsConst;
}
case arith::CmpIPredicate::ne: {
Expand Down Expand Up @@ -644,7 +655,7 @@ void arith::CmpIOp::inferResultRanges(ArrayRef<ConstantIntRanges> argRanges,

void arith::SelectOp::inferResultRanges(ArrayRef<ConstantIntRanges> argRanges,
SetIntRangeFn setResultRange) {
Optional<APInt> mbCondVal = argRanges[0].getConstantValue();
std::optional<APInt> mbCondVal = argRanges[0].getConstantValue();

if (mbCondVal) {
if (mbCondVal->isZero())
Expand All @@ -663,8 +674,9 @@ void arith::SelectOp::inferResultRanges(ArrayRef<ConstantIntRanges> argRanges,
void arith::ShLIOp::inferResultRanges(ArrayRef<ConstantIntRanges> argRanges,
SetIntRangeFn setResultRange) {
const ConstantIntRanges &lhs = argRanges[0], &rhs = argRanges[1];
ConstArithFn shl = [](const APInt &l, const APInt &r) -> Optional<APInt> {
return r.uge(r.getBitWidth()) ? Optional<APInt>() : l.shl(r);
ConstArithFn shl = [](const APInt &l,
const APInt &r) -> std::optional<APInt> {
return r.uge(r.getBitWidth()) ? std::optional<APInt>() : l.shl(r);
};
ConstantIntRanges urange =
minMaxBy(shl, {lhs.umin(), lhs.umax()}, {rhs.umin(), rhs.umax()},
Expand All @@ -683,8 +695,9 @@ void arith::ShRUIOp::inferResultRanges(ArrayRef<ConstantIntRanges> argRanges,
SetIntRangeFn setResultRange) {
const ConstantIntRanges &lhs = argRanges[0], &rhs = argRanges[1];

ConstArithFn lshr = [](const APInt &l, const APInt &r) -> Optional<APInt> {
return r.uge(r.getBitWidth()) ? Optional<APInt>() : l.lshr(r);
ConstArithFn lshr = [](const APInt &l,
const APInt &r) -> std::optional<APInt> {
return r.uge(r.getBitWidth()) ? std::optional<APInt>() : l.lshr(r);
};
setResultRange(getResult(), minMaxBy(lshr, {lhs.umin(), lhs.umax()},
{rhs.umin(), rhs.umax()},
Expand All @@ -699,8 +712,9 @@ void arith::ShRSIOp::inferResultRanges(ArrayRef<ConstantIntRanges> argRanges,
SetIntRangeFn setResultRange) {
const ConstantIntRanges &lhs = argRanges[0], &rhs = argRanges[1];

ConstArithFn ashr = [](const APInt &l, const APInt &r) -> Optional<APInt> {
return r.uge(r.getBitWidth()) ? Optional<APInt>() : l.ashr(r);
ConstArithFn ashr = [](const APInt &l,
const APInt &r) -> std::optional<APInt> {
return r.uge(r.getBitWidth()) ? std::optional<APInt>() : l.ashr(r);
};

setResultRange(getResult(),
Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Dialect/Async/Transforms/AsyncToAsyncRuntime.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ struct CoroMachinery {
// %0 = arith.constant ... : T
// async.yield %0 : T
// }
Optional<Value> asyncToken; // returned completion token
std::optional<Value> asyncToken; // returned completion token
llvm::SmallVector<Value, 4> returnValues; // returned async values

Value coroHandle; // coroutine handle (!async.coro.getHandle value)
Expand Down Expand Up @@ -163,7 +163,7 @@ static CoroMachinery setupCoroMachinery(func::FuncOp func) {
// async computations
bool isStateful = func.getCallableResults().front().isa<TokenType>();

Optional<Value> retToken;
std::optional<Value> retToken;
if (isStateful)
retToken.emplace(builder.create<RuntimeCreateOp>(TokenType::get(ctx)));

Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Dialect/Bufferization/IR/BufferizationOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -485,12 +485,12 @@ struct SimplifyClones : public OpRewritePattern<CloneOp> {
canonicalSource.getDefiningOp()))
canonicalSource = iface.getViewSource();

llvm::Optional<Operation *> maybeCloneDeallocOp =
std::optional<Operation *> maybeCloneDeallocOp =
memref::findDealloc(cloneOp.getOutput());
// Skip if either of them has > 1 deallocate operations.
if (!maybeCloneDeallocOp.has_value())
return failure();
llvm::Optional<Operation *> maybeSourceDeallocOp =
std::optional<Operation *> maybeSourceDeallocOp =
memref::findDealloc(canonicalSource);
if (!maybeSourceDeallocOp.has_value())
return failure();
Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/Dialect/Bufferization/Transforms/BufferUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ void BufferPlacementAllocs::build(Operation *op) {
// Get allocation result.
Value allocValue = allocateResultEffects[0].getValue();
// Find the associated dealloc value and register the allocation entry.
llvm::Optional<Operation *> dealloc = memref::findDealloc(allocValue);
std::optional<Operation *> dealloc = memref::findDealloc(allocValue);
// If the allocation has > 1 dealloc associated with it, skip handling it.
if (!dealloc)
return;
Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,7 @@ struct OneShotBufferizePass
}

private:
llvm::Optional<OneShotBufferizationOptions> options;
std::optional<OneShotBufferizationOptions> options;
};
} // namespace

Expand Down
Loading