2 changes: 1 addition & 1 deletion mlir/lib/Conversion/TosaToSCF/TosaToSCFPass.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,5 +48,5 @@ std::unique_ptr<Pass> mlir::tosa::createTosaToSCF() {
}

void mlir::tosa::addTosaToSCFPasses(OpPassManager &pm) {
pm.addNestedPass<FuncOp>(createTosaToSCF());
pm.addNestedPass<func::FuncOp>(createTosaToSCF());
}
2 changes: 1 addition & 1 deletion mlir/lib/Dialect/Affine/Analysis/AffineAnalysis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -326,7 +326,7 @@ static Block *getCommonBlock(const MemRefAccess &srcAccess,

if (numCommonLoops == 0) {
Block *block = srcAccess.opInst->getBlock();
while (!llvm::isa<FuncOp>(block->getParentOp())) {
while (!llvm::isa<func::FuncOp>(block->getParentOp())) {
block = block->getParentOp()->getBlock();
}
return block;
Expand Down
13 changes: 8 additions & 5 deletions mlir/lib/Dialect/Affine/Transforms/AffineDataCopyGeneration.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -77,14 +77,17 @@ struct AffineDataCopyGeneration
/// buffers in 'fastMemorySpace', and replaces memory operations to the former
/// by the latter. Only load op's handled for now.
/// TODO: extend this to store op's.
std::unique_ptr<OperationPass<FuncOp>> mlir::createAffineDataCopyGenerationPass(
unsigned slowMemorySpace, unsigned fastMemorySpace, unsigned tagMemorySpace,
int minDmaTransferSize, uint64_t fastMemCapacityBytes) {
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createAffineDataCopyGenerationPass(unsigned slowMemorySpace,
unsigned fastMemorySpace,
unsigned tagMemorySpace,
int minDmaTransferSize,
uint64_t fastMemCapacityBytes) {
return std::make_unique<AffineDataCopyGeneration>(
slowMemorySpace, fastMemorySpace, tagMemorySpace, minDmaTransferSize,
fastMemCapacityBytes);
}
std::unique_ptr<OperationPass<FuncOp>>
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createAffineDataCopyGenerationPass() {
return std::make_unique<AffineDataCopyGeneration>();
}
Expand Down Expand Up @@ -196,7 +199,7 @@ void AffineDataCopyGeneration::runOnBlock(Block *block,
}

void AffineDataCopyGeneration::runOnOperation() {
FuncOp f = getOperation();
func::FuncOp f = getOperation();
OpBuilder topBuilder(f.getBody());
zeroIndex = topBuilder.create<arith::ConstantIndexOp>(f.getLoc(), 0);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,7 @@ void LoopInvariantCodeMotion::runOnOperation() {
});
}

std::unique_ptr<OperationPass<FuncOp>>
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createAffineLoopInvariantCodeMotionPass() {
return std::make_unique<LoopInvariantCodeMotion>();
}
3 changes: 2 additions & 1 deletion mlir/lib/Dialect/Affine/Transforms/AffineLoopNormalize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ struct AffineLoopNormalizePass

} // namespace

std::unique_ptr<OperationPass<FuncOp>> mlir::createAffineLoopNormalizePass() {
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createAffineLoopNormalizePass() {
return std::make_unique<AffineLoopNormalizePass>();
}
5 changes: 3 additions & 2 deletions mlir/lib/Dialect/Affine/Transforms/AffineParallelize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ struct ParallelizationCandidate {
} // namespace

void AffineParallelize::runOnOperation() {
FuncOp f = getOperation();
func::FuncOp f = getOperation();

// The walker proceeds in pre-order to process the outer loops first
// and control the number of outer parallel loops.
Expand Down Expand Up @@ -81,6 +81,7 @@ void AffineParallelize::runOnOperation() {
}
}

std::unique_ptr<OperationPass<FuncOp>> mlir::createAffineParallelizePass() {
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createAffineParallelizePass() {
return std::make_unique<AffineParallelize>();
}
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ struct AffineScalarReplacement

} // namespace

std::unique_ptr<OperationPass<FuncOp>>
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createAffineScalarReplacementPass() {
return std::make_unique<AffineScalarReplacement>();
}
Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Dialect/Affine/Transforms/LoopCoalescing.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ struct LoopCoalescingPass : public LoopCoalescingBase<LoopCoalescingPass> {
}

void runOnOperation() override {
FuncOp func = getOperation();
func::FuncOp func = getOperation();
func.walk([&](Operation *op) {
if (auto scfForOp = dyn_cast<scf::ForOp>(op))
walkLoop(scfForOp);
Expand All @@ -97,6 +97,6 @@ struct LoopCoalescingPass : public LoopCoalescingBase<LoopCoalescingPass> {

} // namespace

std::unique_ptr<OperationPass<FuncOp>> mlir::createLoopCoalescingPass() {
std::unique_ptr<OperationPass<func::FuncOp>> mlir::createLoopCoalescingPass() {
return std::make_unique<LoopCoalescingPass>();
}
10 changes: 5 additions & 5 deletions mlir/lib/Dialect/Affine/Transforms/LoopFusion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ struct LoopFusion : public AffineLoopFusionBase<LoopFusion> {

} // namespace

std::unique_ptr<OperationPass<FuncOp>>
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createLoopFusionPass(unsigned fastMemorySpace,
uint64_t localBufSizeThreshold, bool maximalFusion,
enum FusionMode affineFusionMode) {
Expand Down Expand Up @@ -202,7 +202,7 @@ struct MemRefDependenceGraph {

// Initializes the dependence graph based on operations in 'f'.
// Returns true on success, false otherwise.
bool init(FuncOp f);
bool init(func::FuncOp f);

// Returns the graph node for 'id'.
Node *getNode(unsigned id) {
Expand Down Expand Up @@ -731,7 +731,7 @@ void gatherEscapingMemrefs(unsigned id, MemRefDependenceGraph *mdg,
// Assigns each node in the graph a node id based on program order in 'f'.
// TODO: Add support for taking a Block arg to construct the
// dependence graph at a different depth.
bool MemRefDependenceGraph::init(FuncOp f) {
bool MemRefDependenceGraph::init(func::FuncOp f) {
LLVM_DEBUG(llvm::dbgs() << "--- Initializing MDG ---\n");
DenseMap<Value, SetVector<unsigned>> memrefAccesses;

Expand Down Expand Up @@ -895,7 +895,7 @@ static Value createPrivateMemRef(AffineForOp forOp, Operation *srcStoreOpInst,
// Create builder to insert alloc op just before 'forOp'.
OpBuilder b(forInst);
// Builder to create constants at the top level.
OpBuilder top(forInst->getParentOfType<FuncOp>().getBody());
OpBuilder top(forInst->getParentOfType<func::FuncOp>().getBody());
// Create new memref type based on slice bounds.
auto oldMemRef = cast<AffineWriteOpInterface>(srcStoreOpInst).getMemRef();
auto oldMemRefType = oldMemRef.getType().cast<MemRefType>();
Expand Down Expand Up @@ -1853,7 +1853,7 @@ struct GreedyFusion {
};

// Search for siblings which load the same memref function argument.
auto fn = dstNode->op->getParentOfType<FuncOp>();
auto fn = dstNode->op->getParentOfType<func::FuncOp>();
for (unsigned i = 0, e = fn.getNumArguments(); i != e; ++i) {
for (auto *user : fn.getArgument(i).getUsers()) {
if (auto loadOp = dyn_cast<AffineReadOpInterface>(user)) {
Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Dialect/Affine/Transforms/LoopTiling.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,11 +53,11 @@ struct LoopTiling : public AffineLoopTilingBase<LoopTiling> {

/// Creates a pass to perform loop tiling on all suitable loop nests of a
/// Function.
std::unique_ptr<OperationPass<FuncOp>>
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createLoopTilingPass(uint64_t cacheSizeBytes) {
return std::make_unique<LoopTiling>(cacheSizeBytes);
}
std::unique_ptr<OperationPass<FuncOp>> mlir::createLoopTilingPass() {
std::unique_ptr<OperationPass<func::FuncOp>> mlir::createLoopTilingPass() {
return std::make_unique<LoopTiling>();
}

Expand Down
6 changes: 3 additions & 3 deletions mlir/lib/Dialect/Affine/Transforms/LoopUnroll.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ static bool isInnermostAffineForOp(AffineForOp op) {
}

/// Gathers loops that have no affine.for's nested within.
static void gatherInnermostLoops(FuncOp f,
static void gatherInnermostLoops(func::FuncOp f,
SmallVectorImpl<AffineForOp> &loops) {
f.walk([&](AffineForOp forOp) {
if (isInnermostAffineForOp(forOp))
Expand All @@ -80,7 +80,7 @@ static void gatherInnermostLoops(FuncOp f,
}

void LoopUnroll::runOnOperation() {
FuncOp func = getOperation();
func::FuncOp func = getOperation();
if (func.isExternal())
return;

Expand Down Expand Up @@ -132,7 +132,7 @@ LogicalResult LoopUnroll::runOnAffineForOp(AffineForOp forOp) {
return loopUnrollByFactor(forOp, unrollFactor);
}

std::unique_ptr<OperationPass<FuncOp>> mlir::createLoopUnrollPass(
std::unique_ptr<OperationPass<func::FuncOp>> mlir::createLoopUnrollPass(
int unrollFactor, bool unrollUpToFactor, bool unrollFull,
const std::function<unsigned(AffineForOp)> &getUnrollFactor) {
return std::make_unique<LoopUnroll>(
Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/Dialect/Affine/Transforms/LoopUnrollAndJam.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ struct LoopUnrollAndJam : public AffineLoopUnrollAndJamBase<LoopUnrollAndJam> {
};
} // namespace

std::unique_ptr<OperationPass<FuncOp>>
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createLoopUnrollAndJamPass(int unrollJamFactor) {
return std::make_unique<LoopUnrollAndJam>(
unrollJamFactor == -1 ? None : Optional<unsigned>(unrollJamFactor));
Expand Down
3 changes: 2 additions & 1 deletion mlir/lib/Dialect/Affine/Transforms/PipelineDataTransfer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,8 @@ struct PipelineDataTransfer

/// Creates a pass to pipeline explicit movement of data across levels of the
/// memory hierarchy.
std::unique_ptr<OperationPass<FuncOp>> mlir::createPipelineDataTransferPass() {
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createPipelineDataTransferPass() {
return std::make_unique<PipelineDataTransfer>();
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ struct SimplifyAffineStructures

} // namespace

std::unique_ptr<OperationPass<FuncOp>>
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createSimplifyAffineStructuresPass() {
return std::make_unique<SimplifyAffineStructures>();
}
Expand Down
10 changes: 5 additions & 5 deletions mlir/lib/Dialect/Affine/Transforms/SuperVectorize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1700,18 +1700,18 @@ static void vectorizeLoops(Operation *parentOp, DenseSet<Operation *> &loops,
LLVM_DEBUG(dbgs() << "\n");
}

std::unique_ptr<OperationPass<FuncOp>>
std::unique_ptr<OperationPass<func::FuncOp>>
createSuperVectorizePass(ArrayRef<int64_t> virtualVectorSize) {
return std::make_unique<Vectorize>(virtualVectorSize);
}
std::unique_ptr<OperationPass<FuncOp>> createSuperVectorizePass() {
std::unique_ptr<OperationPass<func::FuncOp>> createSuperVectorizePass() {
return std::make_unique<Vectorize>();
}

/// Applies vectorization to the current function by searching over a bunch of
/// predetermined patterns.
void Vectorize::runOnOperation() {
FuncOp f = getOperation();
func::FuncOp f = getOperation();
if (!fastestVaryingPattern.empty() &&
fastestVaryingPattern.size() != vectorSizes.size()) {
f.emitRemark("Fastest varying pattern specified with different size than "
Expand Down Expand Up @@ -1855,11 +1855,11 @@ vectorizeAffineLoopNest(std::vector<SmallVector<AffineForOp, 2>> &loops,
return vectorizeLoopNest(loops, strategy);
}

std::unique_ptr<OperationPass<FuncOp>>
std::unique_ptr<OperationPass<func::FuncOp>>
createSuperVectorizePass(ArrayRef<int64_t> virtualVectorSize) {
return std::make_unique<Vectorize>(virtualVectorSize);
}
std::unique_ptr<OperationPass<FuncOp>> createSuperVectorizePass() {
std::unique_ptr<OperationPass<func::FuncOp>> createSuperVectorizePass() {
return std::make_unique<Vectorize>();
}

Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/Dialect/Affine/Utils/LoopFusionUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -482,7 +482,7 @@ bool mlir::getLoopNestStats(AffineForOp forOpRoot, LoopNestStats *stats) {
auto walkResult = forOpRoot.walk([&](AffineForOp forOp) {
auto *childForOp = forOp.getOperation();
auto *parentForOp = forOp->getParentOp();
if (!llvm::isa<FuncOp>(parentForOp)) {
if (!llvm::isa<func::FuncOp>(parentForOp)) {
if (!isa<AffineForOp>(parentForOp)) {
LLVM_DEBUG(llvm::dbgs() << "Expected parent AffineForOp\n");
return WalkResult::interrupt();
Expand Down
10 changes: 5 additions & 5 deletions mlir/lib/Dialect/Affine/Utils/LoopUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ LogicalResult mlir::promoteIfSingleIteration(AffineForOp forOp) {
auto *parentBlock = forOp->getBlock();
if (!iv.use_empty()) {
if (forOp.hasConstantLowerBound()) {
OpBuilder topBuilder(forOp->getParentOfType<FuncOp>().getBody());
OpBuilder topBuilder(forOp->getParentOfType<func::FuncOp>().getBody());
auto constOp = topBuilder.create<arith::ConstantIndexOp>(
forOp.getLoc(), forOp.getConstantLowerBound());
iv.replaceAllUsesWith(constOp);
Expand Down Expand Up @@ -960,7 +960,7 @@ void mlir::getPerfectlyNestedLoops(SmallVectorImpl<AffineForOp> &nestedLoops,
/// Identify valid and profitable bands of loops to tile. This is currently just
/// a temporary placeholder to test the mechanics of tiled code generation.
/// Returns all maximal outermost perfect loop nests to tile.
void mlir::getTileableBands(FuncOp f,
void mlir::getTileableBands(func::FuncOp f,
std::vector<SmallVector<AffineForOp, 6>> *bands) {
// Get maximal perfect nest of 'affine.for' insts starting from root
// (inclusive).
Expand Down Expand Up @@ -2049,7 +2049,7 @@ static LogicalResult generateCopy(
*nBegin = begin;
*nEnd = end;

FuncOp f = begin->getParentOfType<FuncOp>();
func::FuncOp f = begin->getParentOfType<func::FuncOp>();
OpBuilder topBuilder(f.getBody());
Value zeroIndex = topBuilder.create<arith::ConstantIndexOp>(f.getLoc(), 0);

Expand All @@ -2067,7 +2067,7 @@ static LogicalResult generateCopy(
OpBuilder &b = region.isWrite() ? epilogue : prologue;

// Builder to create constants at the top level.
auto func = copyPlacementBlock->getParent()->getParentOfType<FuncOp>();
auto func = copyPlacementBlock->getParent()->getParentOfType<func::FuncOp>();
OpBuilder top(func.getBody());

auto loc = region.loc;
Expand Down Expand Up @@ -2614,7 +2614,7 @@ gatherLoopsInBlock(Block *block, unsigned currLoopDepth,
}

/// Gathers all AffineForOps in 'func.func' grouped by loop depth.
void mlir::gatherLoops(FuncOp func,
void mlir::gatherLoops(func::FuncOp func,
std::vector<SmallVector<AffineForOp, 2>> &depthToLoops) {
for (auto &block : func)
gatherLoopsInBlock(&block, /*currLoopDepth=*/0, depthToLoops);
Expand Down
10 changes: 5 additions & 5 deletions mlir/lib/Dialect/Affine/Utils/Utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ static Operation *getOutermostInvariantForOp(AffineIfOp ifOp) {
// Walk up the parents past all for op that this conditional is invariant on.
auto ifOperands = ifOp.getOperands();
auto *res = ifOp.getOperation();
while (!isa<FuncOp>(res->getParentOp())) {
while (!isa<func::FuncOp>(res->getParentOp())) {
auto *parentOp = res->getParentOp();
if (auto forOp = dyn_cast<AffineForOp>(parentOp)) {
if (llvm::is_contained(ifOperands, forOp.getInductionVar()))
Expand Down Expand Up @@ -1020,7 +1020,7 @@ static void loadCSE(AffineReadOpInterface loadA,
// currently only eliminates the stores only if no other loads/uses (other
// than dealloc) remain.
//
void mlir::affineScalarReplace(FuncOp f, DominanceInfo &domInfo,
void mlir::affineScalarReplace(func::FuncOp f, DominanceInfo &domInfo,
PostDominanceInfo &postDomInfo) {
// Load op's whose results were replaced by those forwarded from stores.
SmallVector<Operation *, 8> opsToErase;
Expand Down Expand Up @@ -1277,12 +1277,12 @@ LogicalResult mlir::replaceAllMemRefUsesWith(
std::unique_ptr<DominanceInfo> domInfo;
std::unique_ptr<PostDominanceInfo> postDomInfo;
if (domOpFilter)
domInfo =
std::make_unique<DominanceInfo>(domOpFilter->getParentOfType<FuncOp>());
domInfo = std::make_unique<DominanceInfo>(
domOpFilter->getParentOfType<func::FuncOp>());

if (postDomOpFilter)
postDomInfo = std::make_unique<PostDominanceInfo>(
postDomOpFilter->getParentOfType<FuncOp>());
postDomOpFilter->getParentOfType<func::FuncOp>());

// Walk all uses of old memref; collect ops to perform replacement. We use a
// DenseSet since an operation could potentially have multiple uses of a
Expand Down
23 changes: 12 additions & 11 deletions mlir/lib/Dialect/Async/Transforms/AsyncParallelFor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ struct ParallelComputeFunctionBounds {

struct ParallelComputeFunction {
unsigned numLoops;
FuncOp func;
func::FuncOp func;
llvm::SmallVector<Value> captures;
};

Expand Down Expand Up @@ -258,11 +258,11 @@ static ParallelComputeFunction createParallelComputeFunction(
getParallelComputeFunctionType(op, rewriter);

FunctionType type = computeFuncType.type;
FuncOp func = FuncOp::create(op.getLoc(),
numBlockAlignedInnerLoops > 0
? "parallel_compute_fn_with_aligned_loops"
: "parallel_compute_fn",
type);
func::FuncOp func = func::FuncOp::create(
op.getLoc(),
numBlockAlignedInnerLoops > 0 ? "parallel_compute_fn_with_aligned_loops"
: "parallel_compute_fn",
type);
func.setPrivate();

// Insert function into the module symbol table and assign it unique name.
Expand Down Expand Up @@ -455,8 +455,9 @@ static ParallelComputeFunction createParallelComputeFunction(
// call @parallel_compute_fn(%block_start, %block_size, ...);
// }
//
static FuncOp createAsyncDispatchFunction(ParallelComputeFunction &computeFunc,
PatternRewriter &rewriter) {
static func::FuncOp
createAsyncDispatchFunction(ParallelComputeFunction &computeFunc,
PatternRewriter &rewriter) {
OpBuilder::InsertionGuard guard(rewriter);
Location loc = computeFunc.func.getLoc();
ImplicitLocOpBuilder b(loc, rewriter);
Expand All @@ -476,7 +477,7 @@ static FuncOp createAsyncDispatchFunction(ParallelComputeFunction &computeFunc,
inputTypes.append(computeFuncInputTypes.begin(), computeFuncInputTypes.end());

FunctionType type = rewriter.getFunctionType(inputTypes, TypeRange());
FuncOp func = FuncOp::create(loc, "async_dispatch_fn", type);
func::FuncOp func = func::FuncOp::create(loc, "async_dispatch_fn", type);
func.setPrivate();

// Insert function into the module symbol table and assign it unique name.
Expand Down Expand Up @@ -580,7 +581,7 @@ static void doAsyncDispatch(ImplicitLocOpBuilder &b, PatternRewriter &rewriter,

// Add one more level of indirection to dispatch parallel compute functions
// using async operations and recursive work splitting.
FuncOp asyncDispatchFunction =
func::FuncOp asyncDispatchFunction =
createAsyncDispatchFunction(parallelComputeFunction, rewriter);

Value c0 = b.create<arith::ConstantIndexOp>(0);
Expand Down Expand Up @@ -651,7 +652,7 @@ doSequentialDispatch(ImplicitLocOpBuilder &b, PatternRewriter &rewriter,
const SmallVector<Value> &tripCounts) {
MLIRContext *ctx = op->getContext();

FuncOp compute = parallelComputeFunction.func;
func::FuncOp compute = parallelComputeFunction.func;

Value c0 = b.create<arith::ConstantIndexOp>(0);
Value c1 = b.create<arith::ConstantIndexOp>(1);
Expand Down
62 changes: 33 additions & 29 deletions mlir/lib/Dialect/Async/Transforms/AsyncToAsyncRuntime.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ class AsyncToAsyncRuntimePass
/// operation to enable non-blocking waiting via coroutine suspension.
namespace {
struct CoroMachinery {
FuncOp func;
func::FuncOp func;

// Async execute region returns a completion token, and an async value for
// each yielded value.
Expand Down Expand Up @@ -124,7 +124,7 @@ struct CoroMachinery {
/// return %token, %value : !async.token, !async.value<T>
/// }
///
static CoroMachinery setupCoroMachinery(FuncOp func) {
static CoroMachinery setupCoroMachinery(func::FuncOp func) {
assert(!func.getBlocks().empty() && "Function must have an entry block");

MLIRContext *ctx = func.getContext();
Expand Down Expand Up @@ -237,7 +237,7 @@ static Block *setupSetErrorBlock(CoroMachinery &coro) {
/// function.
///
/// Note that this is not reversible transformation.
static std::pair<FuncOp, CoroMachinery>
static std::pair<func::FuncOp, CoroMachinery>
outlineExecuteOp(SymbolTable &symbolTable, ExecuteOp execute) {
ModuleOp module = execute->getParentOfType<ModuleOp>();

Expand Down Expand Up @@ -265,7 +265,8 @@ outlineExecuteOp(SymbolTable &symbolTable, ExecuteOp execute) {

// TODO: Derive outlined function name from the parent FuncOp (support
// multiple nested async.execute operations).
FuncOp func = FuncOp::create(loc, kAsyncFnPrefix, funcType, funcAttrs);
func::FuncOp func =
func::FuncOp::create(loc, kAsyncFnPrefix, funcType, funcAttrs);
symbolTable.insert(func);

SymbolTable::setSymbolVisibility(func, SymbolTable::Visibility::Private);
Expand Down Expand Up @@ -385,8 +386,9 @@ class AwaitOpLoweringBase : public OpConversionPattern<AwaitType> {
using AwaitAdaptor = typename AwaitType::Adaptor;

public:
AwaitOpLoweringBase(MLIRContext *ctx,
llvm::DenseMap<FuncOp, CoroMachinery> &outlinedFunctions)
AwaitOpLoweringBase(
MLIRContext *ctx,
llvm::DenseMap<func::FuncOp, CoroMachinery> &outlinedFunctions)
: OpConversionPattern<AwaitType>(ctx),
outlinedFunctions(outlinedFunctions) {}

Expand All @@ -399,7 +401,7 @@ class AwaitOpLoweringBase : public OpConversionPattern<AwaitType> {
return rewriter.notifyMatchFailure(op, "unsupported awaitable type");

// Check if await operation is inside the outlined coroutine function.
auto func = op->template getParentOfType<FuncOp>();
auto func = op->template getParentOfType<func::FuncOp>();
auto outlined = outlinedFunctions.find(func);
const bool isInCoroutine = outlined != outlinedFunctions.end();

Expand Down Expand Up @@ -479,7 +481,7 @@ class AwaitOpLoweringBase : public OpConversionPattern<AwaitType> {
}

private:
llvm::DenseMap<FuncOp, CoroMachinery> &outlinedFunctions;
llvm::DenseMap<func::FuncOp, CoroMachinery> &outlinedFunctions;
};

/// Lowering for `async.await` with a token operand.
Expand Down Expand Up @@ -524,15 +526,15 @@ class YieldOpLowering : public OpConversionPattern<async::YieldOp> {
public:
YieldOpLowering(
MLIRContext *ctx,
const llvm::DenseMap<FuncOp, CoroMachinery> &outlinedFunctions)
const llvm::DenseMap<func::FuncOp, CoroMachinery> &outlinedFunctions)
: OpConversionPattern<async::YieldOp>(ctx),
outlinedFunctions(outlinedFunctions) {}

LogicalResult
matchAndRewrite(async::YieldOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
// Check if yield operation is inside the async coroutine function.
auto func = op->template getParentOfType<FuncOp>();
auto func = op->template getParentOfType<func::FuncOp>();
auto outlined = outlinedFunctions.find(func);
if (outlined == outlinedFunctions.end())
return rewriter.notifyMatchFailure(
Expand All @@ -557,7 +559,7 @@ class YieldOpLowering : public OpConversionPattern<async::YieldOp> {
}

private:
const llvm::DenseMap<FuncOp, CoroMachinery> &outlinedFunctions;
const llvm::DenseMap<func::FuncOp, CoroMachinery> &outlinedFunctions;
};

//===----------------------------------------------------------------------===//
Expand All @@ -566,16 +568,17 @@ class YieldOpLowering : public OpConversionPattern<async::YieldOp> {

class AssertOpLowering : public OpConversionPattern<cf::AssertOp> {
public:
AssertOpLowering(MLIRContext *ctx,
llvm::DenseMap<FuncOp, CoroMachinery> &outlinedFunctions)
AssertOpLowering(
MLIRContext *ctx,
llvm::DenseMap<func::FuncOp, CoroMachinery> &outlinedFunctions)
: OpConversionPattern<cf::AssertOp>(ctx),
outlinedFunctions(outlinedFunctions) {}

LogicalResult
matchAndRewrite(cf::AssertOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
// Check if assert operation is inside the async coroutine function.
auto func = op->template getParentOfType<FuncOp>();
auto func = op->template getParentOfType<func::FuncOp>();
auto outlined = outlinedFunctions.find(func);
if (outlined == outlinedFunctions.end())
return rewriter.notifyMatchFailure(
Expand All @@ -597,7 +600,7 @@ class AssertOpLowering : public OpConversionPattern<cf::AssertOp> {
}

private:
llvm::DenseMap<FuncOp, CoroMachinery> &outlinedFunctions;
llvm::DenseMap<func::FuncOp, CoroMachinery> &outlinedFunctions;
};

//===----------------------------------------------------------------------===//
Expand All @@ -607,7 +610,7 @@ class AssertOpLowering : public OpConversionPattern<cf::AssertOp> {
/// 2) Prepending the results with `async.token`.
/// 3) Setting up coroutine blocks.
/// 4) Rewriting return ops as yield op and branch op into the suspend block.
static CoroMachinery rewriteFuncAsCoroutine(FuncOp func) {
static CoroMachinery rewriteFuncAsCoroutine(func::FuncOp func) {
auto *ctx = func->getContext();
auto loc = func.getLoc();
SmallVector<Type> resultTypes;
Expand All @@ -632,7 +635,8 @@ static CoroMachinery rewriteFuncAsCoroutine(FuncOp func) {
///
/// The invocation of this function is safe only when call ops are traversed in
/// reverse order of how they appear in a single block. See `funcsToCoroutines`.
static void rewriteCallsiteForCoroutine(func::CallOp oldCall, FuncOp func) {
static void rewriteCallsiteForCoroutine(func::CallOp oldCall,
func::FuncOp func) {
auto loc = func.getLoc();
ImplicitLocOpBuilder callBuilder(loc, oldCall);
auto newCall = callBuilder.create<func::CallOp>(
Expand All @@ -651,25 +655,25 @@ static void rewriteCallsiteForCoroutine(func::CallOp oldCall, FuncOp func) {
oldCall.erase();
}

static bool isAllowedToBlock(FuncOp func) {
static bool isAllowedToBlock(func::FuncOp func) {
return !!func->getAttrOfType<UnitAttr>(AsyncDialect::kAllowedToBlockAttrName);
}

static LogicalResult
funcsToCoroutines(ModuleOp module,
llvm::DenseMap<FuncOp, CoroMachinery> &outlinedFunctions) {
static LogicalResult funcsToCoroutines(
ModuleOp module,
llvm::DenseMap<func::FuncOp, CoroMachinery> &outlinedFunctions) {
// The following code supports the general case when 2 functions mutually
// recurse into each other. Because of this and that we are relying on
// SymbolUserMap to find pointers to calling FuncOps, we cannot simply erase
// a FuncOp while inserting an equivalent coroutine, because that could lead
// to dangling pointers.

SmallVector<FuncOp> funcWorklist;
SmallVector<func::FuncOp> funcWorklist;

// Careful, it's okay to add a func to the worklist multiple times if and only
// if the loop processing the worklist will skip the functions that have
// already been converted to coroutines.
auto addToWorklist = [&](FuncOp func) {
auto addToWorklist = [&](func::FuncOp func) {
if (isAllowedToBlock(func))
return;
// N.B. To refactor this code into a separate pass the lookup in
Expand All @@ -688,7 +692,7 @@ funcsToCoroutines(ModuleOp module,
};

// Traverse in post-order collecting for each func op the await ops it has.
for (FuncOp func : module.getOps<FuncOp>())
for (func::FuncOp func : module.getOps<func::FuncOp>())
addToWorklist(func);

SymbolTableCollection symbolTable;
Expand Down Expand Up @@ -718,7 +722,7 @@ funcsToCoroutines(ModuleOp module,
// Rewrite the callsites to await on results of the newly created coroutine.
for (Operation *op : users) {
if (func::CallOp call = dyn_cast<func::CallOp>(*op)) {
FuncOp caller = call->getParentOfType<FuncOp>();
func::FuncOp caller = call->getParentOfType<func::FuncOp>();
rewriteCallsiteForCoroutine(call, func); // Careful, erases the call op.
addToWorklist(caller);
} else {
Expand All @@ -736,7 +740,7 @@ void AsyncToAsyncRuntimePass::runOnOperation() {
SymbolTable symbolTable(module);

// Outline all `async.execute` body regions into async functions (coroutines).
llvm::DenseMap<FuncOp, CoroMachinery> outlinedFunctions;
llvm::DenseMap<func::FuncOp, CoroMachinery> outlinedFunctions;

module.walk([&](ExecuteOp execute) {
outlinedFunctions.insert(outlineExecuteOp(symbolTable, execute));
Expand All @@ -749,7 +753,7 @@ void AsyncToAsyncRuntimePass::runOnOperation() {

// Returns true if operation is inside the coroutine.
auto isInCoroutine = [&](Operation *op) -> bool {
auto parentFunc = op->getParentOfType<FuncOp>();
auto parentFunc = op->getParentOfType<func::FuncOp>();
return outlinedFunctions.find(parentFunc) != outlinedFunctions.end();
};

Expand Down Expand Up @@ -800,14 +804,14 @@ void AsyncToAsyncRuntimePass::runOnOperation() {
// Assertions must be converted to runtime errors inside async functions.
runtimeTarget.addDynamicallyLegalOp<cf::AssertOp>(
[&](cf::AssertOp op) -> bool {
auto func = op->getParentOfType<FuncOp>();
auto func = op->getParentOfType<func::FuncOp>();
return outlinedFunctions.find(func) == outlinedFunctions.end();
});

if (eliminateBlockingAwaitOps)
runtimeTarget.addDynamicallyLegalOp<RuntimeAwaitOp>(
[&](RuntimeAwaitOp op) -> bool {
return isAllowedToBlock(op->getParentOfType<FuncOp>());
return isAllowedToBlock(op->getParentOfType<func::FuncOp>());
});

if (failed(applyPartialConversion(module, runtimeTarget,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -626,7 +626,7 @@ bool bufferization::isFunctionArgument(Value value) {
auto bbArg = value.dyn_cast<BlockArgument>();
if (!bbArg)
return false;
return isa<FuncOp>(bbArg.getOwner()->getParentOp());
return isa<func::FuncOp>(bbArg.getOwner()->getParentOp());
}

MemRefType bufferization::getContiguousMemRefType(ShapedType shapedType,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ walkReturnOperations(Region *region,
static bool validateSupportedControlFlow(Operation *op) {
WalkResult result = op->walk([&](Operation *operation) {
// Only check ops that are inside a function.
if (!operation->getParentOfType<FuncOp>())
if (!operation->getParentOfType<func::FuncOp>())
return WalkResult::advance();

auto regions = operation->getRegions();
Expand Down Expand Up @@ -641,7 +641,7 @@ struct BufferDeallocationPass : BufferDeallocationBase<BufferDeallocationPass> {
}

void runOnOperation() override {
FuncOp func = getOperation();
func::FuncOp func = getOperation();
if (func.isExternal())
return;

Expand All @@ -654,7 +654,7 @@ struct BufferDeallocationPass : BufferDeallocationBase<BufferDeallocationPass> {

LogicalResult bufferization::deallocateBuffers(Operation *op) {
if (isa<ModuleOp>(op)) {
WalkResult result = op->walk([&](FuncOp funcOp) {
WalkResult result = op->walk([&](func::FuncOp funcOp) {
if (failed(deallocateBuffers(funcOp)))
return WalkResult::interrupt();
return WalkResult::advance();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ using namespace mlir;
// Updates the func op and entry block.
//
// Any args appended to the entry block are added to `appendedEntryArgs`.
static void updateFuncOp(FuncOp func,
static void updateFuncOp(func::FuncOp func,
SmallVectorImpl<BlockArgument> &appendedEntryArgs) {
auto functionType = func.getFunctionType();

Expand Down Expand Up @@ -57,10 +57,10 @@ static void updateFuncOp(FuncOp func,
appendedEntryArgs.push_back(func.front().addArgument(type, loc));
}

// Updates all ReturnOps in the scope of the given FuncOp by either keeping them
// as return values or copying the associated buffer contents into the given
// out-params.
static void updateReturnOps(FuncOp func,
// Updates all ReturnOps in the scope of the given func::FuncOp by either
// keeping them as return values or copying the associated buffer contents into
// the given out-params.
static void updateReturnOps(func::FuncOp func,
ArrayRef<BlockArgument> appendedEntryArgs) {
func.walk([&](func::ReturnOp op) {
SmallVector<Value, 6> copyIntoOutParams;
Expand Down Expand Up @@ -128,7 +128,7 @@ struct BufferResultsToOutParamsPass
void runOnOperation() override {
ModuleOp module = getOperation();

for (auto func : module.getOps<FuncOp>()) {
for (auto func : module.getOps<func::FuncOp>()) {
SmallVector<BlockArgument, 6> appendedEntryArgs;
updateFuncOp(func, appendedEntryArgs);
if (func.isExternal())
Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/Dialect/Bufferization/Transforms/Bufferize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ std::unique_ptr<Pass> mlir::bufferization::createOneShotBufferizePass(
return std::make_unique<OneShotBufferizePass>(options);
}

std::unique_ptr<OperationPass<FuncOp>>
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::bufferization::createFinalizingBufferizePass() {
return std::make_unique<FinalizingBufferizePass>();
}
Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Dialect/Func/Transforms/DecomposeCallGraphTypes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -57,12 +57,12 @@ namespace {
/// Expand function arguments according to the provided TypeConverter and
/// ValueDecomposer.
struct DecomposeCallGraphTypesForFuncArgs
: public DecomposeCallGraphTypesOpConversionPattern<FuncOp> {
: public DecomposeCallGraphTypesOpConversionPattern<func::FuncOp> {
using DecomposeCallGraphTypesOpConversionPattern::
DecomposeCallGraphTypesOpConversionPattern;

LogicalResult
matchAndRewrite(FuncOp op, OpAdaptor adaptor,
matchAndRewrite(func::FuncOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const final {
auto functionType = op.getFunctionType();

Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/Dialect/GPU/Transforms/AsyncRegionRewriter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -338,6 +338,6 @@ void GpuAsyncRegionPass::runOnOperation() {
getOperation().getRegion().walk(SingleTokenUseCallback());
}

std::unique_ptr<OperationPass<FuncOp>> mlir::createGpuAsyncRegionPass() {
std::unique_ptr<OperationPass<func::FuncOp>> mlir::createGpuAsyncRegionPass() {
return std::make_unique<GpuAsyncRegionPass>();
}
5 changes: 3 additions & 2 deletions mlir/lib/Dialect/GPU/Transforms/KernelOutlining.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -293,13 +293,14 @@ class GpuKernelOutliningPass
void runOnOperation() override {
SymbolTable symbolTable(getOperation());
bool modified = false;
for (auto func : getOperation().getOps<FuncOp>()) {
for (auto func : getOperation().getOps<func::FuncOp>()) {
// Insert just after the function.
Block::iterator insertPt(func->getNextNode());
auto funcWalkResult = func.walk([&](gpu::LaunchOp op) {
SetVector<Value> operands;
std::string kernelFnName =
Twine(op->getParentOfType<FuncOp>().getName(), "_kernel").str();
Twine(op->getParentOfType<func::FuncOp>().getName(), "_kernel")
.str();

gpu::GPUFuncOp outlinedFunc =
outlineKernelFuncImpl(op, kernelFnName, operands);
Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/Dialect/Linalg/Analysis/DependenceAnalysis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ StringRef LinalgDependenceGraph::getDependenceTypeStr(DependenceType depType) {
}

LinalgDependenceGraph
LinalgDependenceGraph::buildDependenceGraph(Aliases &aliases, FuncOp f) {
LinalgDependenceGraph::buildDependenceGraph(Aliases &aliases, func::FuncOp f) {
SmallVector<LinalgOp, 8> linalgOps;
f.walk([&](LinalgOp op) { linalgOps.push_back(op); });
return LinalgDependenceGraph(aliases, linalgOps);
Expand Down
137 changes: 69 additions & 68 deletions mlir/lib/Dialect/Linalg/ComprehensiveBufferize/ModuleBufferization.cpp

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion mlir/lib/Dialect/Linalg/Transforms/Bufferize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,6 @@ struct LinalgBufferizePass : public LinalgBufferizeBase<LinalgBufferizePass> {
};
} // namespace

std::unique_ptr<OperationPass<FuncOp>> mlir::createLinalgBufferizePass() {
std::unique_ptr<OperationPass<func::FuncOp>> mlir::createLinalgBufferizePass() {
return std::make_unique<LinalgBufferizePass>();
}
5 changes: 3 additions & 2 deletions mlir/lib/Dialect/Linalg/Transforms/Generalization.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ struct LinalgGeneralizationPass
} // namespace

void LinalgGeneralizationPass::runOnOperation() {
FuncOp func = getOperation();
func::FuncOp func = getOperation();
RewritePatternSet patterns(&getContext());
populateLinalgNamedOpsGeneralizationPatterns(patterns);
(void)applyPatternsAndFoldGreedily(func.getBody(), std::move(patterns));
Expand All @@ -85,6 +85,7 @@ void mlir::linalg::populateLinalgNamedOpsGeneralizationPatterns(
patterns.add<LinalgGeneralizationPattern>(patterns.getContext(), marker);
}

std::unique_ptr<OperationPass<FuncOp>> mlir::createLinalgGeneralizationPass() {
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createLinalgGeneralizationPass() {
return std::make_unique<LinalgGeneralizationPass>();
}
2 changes: 1 addition & 1 deletion mlir/lib/Dialect/Linalg/Transforms/HoistPadding.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ static bool isOnlyUsedAsInputOfLinalgOp(tensor::PadOp padOp) {
static void
getAtMostNEnclosingLoops(tensor::PadOp padOp, int nLevels,
SmallVector<scf::ForOp> &reverseEnclosingLoops) {
AsmState state(padOp->getParentOfType<mlir::FuncOp>());
AsmState state(padOp->getParentOfType<func::FuncOp>());
(void)state;
scf::ForOp outermostEnclosingForOp = nullptr;
Operation *nextEnclosingOp = padOp->getParentOp();
Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Dialect/Linalg/Transforms/Hoisting.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -339,7 +339,7 @@ static void hoistReadWrite(HoistableRead read, HoistableWrite write,
// 4. Hoist the tensor_read/tensor_write and update the tensor SSA links.
// After this transformation the scf.forOp may have unused arguments that can be
// remove by the canonicalization pass.
void mlir::linalg::hoistRedundantVectorTransfersOnTensor(FuncOp func) {
void mlir::linalg::hoistRedundantVectorTransfersOnTensor(func::FuncOp func) {
bool changed = true;
while (changed) {
changed = false;
Expand Down Expand Up @@ -391,7 +391,7 @@ void mlir::linalg::hoistRedundantVectorTransfersOnTensor(FuncOp func) {
}
}

void mlir::linalg::hoistRedundantVectorTransfers(FuncOp func) {
void mlir::linalg::hoistRedundantVectorTransfers(func::FuncOp func) {
bool changed = true;
while (changed) {
changed = false;
Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Dialect/Linalg/Transforms/InlineScalarOperands.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ namespace {
struct LinalgInlineScalarOperandsPass
: public LinalgInlineScalarOperandsBase<LinalgInlineScalarOperandsPass> {
void runOnOperation() override {
FuncOp funcOp = getOperation();
func::FuncOp funcOp = getOperation();
MLIRContext *context = funcOp.getContext();
RewritePatternSet patterns(context);

Expand All @@ -108,7 +108,7 @@ struct LinalgInlineScalarOperandsPass
};
} // namespace

std::unique_ptr<OperationPass<FuncOp>>
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createLinalgInlineScalarOperandsPass() {
return std::make_unique<LinalgInlineScalarOperandsPass>();
}
26 changes: 15 additions & 11 deletions mlir/lib/Dialect/Linalg/Transforms/LinalgStrategyPasses.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -450,7 +450,7 @@ struct LinalgStrategyRemoveMarkersPass
} // namespace

/// Create a LinalgStrategyTileAndFusePass.
std::unique_ptr<OperationPass<FuncOp>>
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createLinalgStrategyTileAndFusePass(
StringRef opName, const LinalgTilingAndFusionOptions &options,
const LinalgTransformationFilter &filter) {
Expand All @@ -459,43 +459,46 @@ mlir::createLinalgStrategyTileAndFusePass(
}

/// Create a LinalgStrategyTilePass.
std::unique_ptr<OperationPass<FuncOp>>
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createLinalgStrategyTilePass(StringRef opName,
const LinalgTilingOptions &opt,
const LinalgTransformationFilter &filter) {
return std::make_unique<LinalgStrategyTilePass>(opName, opt, filter);
}

/// Create a LinalgStrategyPadPass.
std::unique_ptr<OperationPass<FuncOp>>
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createLinalgStrategyPadPass(StringRef opName,
const LinalgPaddingOptions &opt,
const LinalgTransformationFilter &filter) {
return std::make_unique<LinalgStrategyPadPass>(opName, opt, filter);
}

/// Create a LinalgStrategyPromotePass.
std::unique_ptr<OperationPass<FuncOp>> mlir::createLinalgStrategyPromotePass(
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createLinalgStrategyPromotePass(
StringRef opName, const LinalgPromotionOptions &opt,
const LinalgTransformationFilter &filter) {
return std::make_unique<LinalgStrategyPromotePass>(opName, opt, filter);
}

/// Create a LinalgStrategyGeneralizePass.
std::unique_ptr<OperationPass<FuncOp>> mlir::createLinalgStrategyGeneralizePass(
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createLinalgStrategyGeneralizePass(
StringRef opName, const LinalgTransformationFilter &filter) {
return std::make_unique<LinalgStrategyGeneralizePass>(opName, filter);
}

/// Create a LinalgStrategyDecomposePass.
// TODO: if/when we need finer control add an `opName` parameter.
std::unique_ptr<OperationPass<FuncOp>> mlir::createLinalgStrategyDecomposePass(
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createLinalgStrategyDecomposePass(
const LinalgTransformationFilter &filter) {
return std::make_unique<LinalgStrategyDecomposePass>(filter);
}

/// Create a LinalgStrategyInterchangePass.
std::unique_ptr<OperationPass<FuncOp>>
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createLinalgStrategyInterchangePass(
ArrayRef<int64_t> iteratorInterchange,
const LinalgTransformationFilter &filter) {
Expand All @@ -504,29 +507,30 @@ mlir::createLinalgStrategyInterchangePass(
}

/// Create a LinalgStrategyVectorizePass.
std::unique_ptr<OperationPass<FuncOp>> mlir::createLinalgStrategyVectorizePass(
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createLinalgStrategyVectorizePass(
StringRef opName, LinalgVectorizationOptions opt,
const LinalgTransformationFilter &filter, bool padVectorize) {
return std::make_unique<LinalgStrategyVectorizePass>(opName, opt, filter,
padVectorize);
}

/// Create a LinalgStrategyEnablePass.
std::unique_ptr<OperationPass<FuncOp>>
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createLinalgStrategyEnablePass(LinalgEnablingOptions opt,
const LinalgTransformationFilter &filter) {
return std::make_unique<LinalgStrategyEnablePass>(opt, filter);
}

/// Create a LinalgStrategyLowerVectorsPass.
std::unique_ptr<OperationPass<FuncOp>>
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createLinalgStrategyLowerVectorsPass(
LinalgVectorLoweringOptions opt, const LinalgTransformationFilter &filter) {
return std::make_unique<LinalgStrategyLowerVectorsPass>(opt, filter);
}

/// Create a LinalgStrategyRemoveMarkersPass.
std::unique_ptr<OperationPass<FuncOp>>
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createLinalgStrategyRemoveMarkersPass() {
return std::make_unique<LinalgStrategyRemoveMarkersPass>();
}
9 changes: 5 additions & 4 deletions mlir/lib/Dialect/Linalg/Transforms/Loops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,7 @@ struct FoldAffineOp : public RewritePattern {
};

template <typename LoopType>
static void lowerLinalgToLoopsImpl(FuncOp funcOp) {
static void lowerLinalgToLoopsImpl(func::FuncOp funcOp) {
MLIRContext *context = funcOp.getContext();
RewritePatternSet patterns(context);
patterns.add<LinalgRewritePattern<LoopType>>(context);
Expand Down Expand Up @@ -338,16 +338,17 @@ struct LowerToParallelLoops

} // namespace

std::unique_ptr<OperationPass<FuncOp>> mlir::createConvertLinalgToLoopsPass() {
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createConvertLinalgToLoopsPass() {
return std::make_unique<LowerToLoops>();
}

std::unique_ptr<OperationPass<FuncOp>>
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createConvertLinalgToParallelLoopsPass() {
return std::make_unique<LowerToParallelLoops>();
}

std::unique_ptr<OperationPass<FuncOp>>
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createConvertLinalgToAffineLoopsPass() {
return std::make_unique<LowerToAffineLoops>();
}
Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Dialect/Linalg/Transforms/Promotion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -414,10 +414,10 @@ struct LinalgPromotionPass : public LinalgPromotionBase<LinalgPromotionPass> {
} // namespace

// TODO: support more transformation options in the pass.
std::unique_ptr<OperationPass<FuncOp>>
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createLinalgPromotionPass(bool dynamicBuffers, bool useAlloca) {
return std::make_unique<LinalgPromotionPass>(dynamicBuffers, useAlloca);
}
std::unique_ptr<OperationPass<FuncOp>> mlir::createLinalgPromotionPass() {
std::unique_ptr<OperationPass<func::FuncOp>> mlir::createLinalgPromotionPass() {
return std::make_unique<LinalgPromotionPass>();
}
6 changes: 3 additions & 3 deletions mlir/lib/Dialect/Linalg/Transforms/Tiling.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -441,7 +441,7 @@ void mlir::linalg::populatePadTensorTilingPatterns(
patterns.add<PadOpTilingPattern>(ctx, options);
}

static void applyExtractSliceOfPadTensorSwapPattern(FuncOp funcOp) {
static void applyExtractSliceOfPadTensorSwapPattern(func::FuncOp funcOp) {
MLIRContext *ctx = funcOp.getContext();
RewritePatternSet patterns(ctx);
patterns.add<ExtractSliceOfPadTensorSwapPattern>(patterns.getContext());
Expand All @@ -460,7 +460,7 @@ struct LinalgTilingPass : public LinalgTilingBase<LinalgTilingPass> {
}

void runOnOperation() override {
FuncOp funcOp = getOperation();
func::FuncOp funcOp = getOperation();
LinalgTilingLoopType type =
llvm::StringSwitch<LinalgTilingLoopType>(loopType)
.Case("for", LinalgTilingLoopType::Loops)
Expand Down Expand Up @@ -491,7 +491,7 @@ struct LinalgTilingPass : public LinalgTilingBase<LinalgTilingPass> {

} // namespace

std::unique_ptr<OperationPass<FuncOp>>
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createLinalgTilingPass(ArrayRef<int64_t> tileSizes,
linalg::LinalgTilingLoopType loopType) {
return std::make_unique<LinalgTilingPass>(tileSizes, loopType);
Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/Dialect/Linalg/Transforms/Transforms.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ mlir::linalg::LinalgTilingOptions::setTileSizes(ArrayRef<int64_t> ts) {
tileSizeComputationFunction = [tileSizes](OpBuilder &b, Operation *op) {
OpBuilder::InsertionGuard guard(b);
b.setInsertionPointToStart(
&op->getParentOfType<FuncOp>().getBody().front());
&op->getParentOfType<func::FuncOp>().getBody().front());
return llvm::to_vector<4>(map_range(tileSizes, [&](int64_t s) {
Value v = b.create<arith::ConstantIndexOp>(op->getLoc(), s);
return v;
Expand Down
1 change: 0 additions & 1 deletion mlir/lib/Dialect/Linalg/Transforms/Vectorization.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -676,7 +676,6 @@ LogicalResult mlir::linalg::vectorizeCopy(RewriterBase &rewriter,
Operation *writeValue = rewriter.create<vector::TransferWriteOp>(
loc, readValue, copyOp.target(), indices,
rewriter.getMultiDimIdentityMap(srcType.getRank()));
copyOp->getParentOfType<FuncOp>().dump();
rewriter.replaceOp(copyOp, writeValue->getResults());
return success();
}
Expand Down
47 changes: 25 additions & 22 deletions mlir/lib/Dialect/MemRef/Transforms/NormalizeMemRefs.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,12 +34,13 @@ namespace {
/// non-normalizable as well. We assume external functions to be normalizable.
struct NormalizeMemRefs : public NormalizeMemRefsBase<NormalizeMemRefs> {
void runOnOperation() override;
void normalizeFuncOpMemRefs(FuncOp funcOp, ModuleOp moduleOp);
bool areMemRefsNormalizable(FuncOp funcOp);
void updateFunctionSignature(FuncOp funcOp, ModuleOp moduleOp);
void setCalleesAndCallersNonNormalizable(FuncOp funcOp, ModuleOp moduleOp,
DenseSet<FuncOp> &normalizableFuncs);
Operation *createOpResultsNormalized(FuncOp funcOp, Operation *oldOp);
void normalizeFuncOpMemRefs(func::FuncOp funcOp, ModuleOp moduleOp);
bool areMemRefsNormalizable(func::FuncOp funcOp);
void updateFunctionSignature(func::FuncOp funcOp, ModuleOp moduleOp);
void setCalleesAndCallersNonNormalizable(
func::FuncOp funcOp, ModuleOp moduleOp,
DenseSet<func::FuncOp> &normalizableFuncs);
Operation *createOpResultsNormalized(func::FuncOp funcOp, Operation *oldOp);
};

} // namespace
Expand All @@ -57,17 +58,17 @@ void NormalizeMemRefs::runOnOperation() {
// normalizable are removed from this set.
// TODO: Change this to work on FuncLikeOp once there is an operation
// interface for it.
DenseSet<FuncOp> normalizableFuncs;
DenseSet<func::FuncOp> normalizableFuncs;
// Initialize `normalizableFuncs` with all the functions within a module.
moduleOp.walk([&](FuncOp funcOp) { normalizableFuncs.insert(funcOp); });
moduleOp.walk([&](func::FuncOp funcOp) { normalizableFuncs.insert(funcOp); });

// Traverse through all the functions applying a filter which determines
// whether that function is normalizable or not. All callers/callees of
// a non-normalizable function will also become non-normalizable even if
// they aren't passing any or specific non-normalizable memrefs. So,
// functions which calls or get called by a non-normalizable becomes non-
// normalizable functions themselves.
moduleOp.walk([&](FuncOp funcOp) {
moduleOp.walk([&](func::FuncOp funcOp) {
if (normalizableFuncs.contains(funcOp)) {
if (!areMemRefsNormalizable(funcOp)) {
LLVM_DEBUG(llvm::dbgs()
Expand All @@ -85,7 +86,7 @@ void NormalizeMemRefs::runOnOperation() {
LLVM_DEBUG(llvm::dbgs() << "Normalizing " << normalizableFuncs.size()
<< " functions\n");
// Those functions which can be normalized are subjected to normalization.
for (FuncOp &funcOp : normalizableFuncs)
for (func::FuncOp &funcOp : normalizableFuncs)
normalizeFuncOpMemRefs(funcOp, moduleOp);
}

Expand All @@ -102,7 +103,8 @@ static bool isMemRefNormalizable(Value::user_range opUsers) {
/// Set all the calling functions and the callees of the function as not
/// normalizable.
void NormalizeMemRefs::setCalleesAndCallersNonNormalizable(
FuncOp funcOp, ModuleOp moduleOp, DenseSet<FuncOp> &normalizableFuncs) {
func::FuncOp funcOp, ModuleOp moduleOp,
DenseSet<func::FuncOp> &normalizableFuncs) {
if (!normalizableFuncs.contains(funcOp))
return;

Expand All @@ -115,8 +117,9 @@ void NormalizeMemRefs::setCalleesAndCallersNonNormalizable(
for (SymbolTable::SymbolUse symbolUse : *symbolUses) {
// TODO: Extend this for ops that are FunctionOpInterface. This would
// require creating an OpInterface for FunctionOpInterface ops.
FuncOp parentFuncOp = symbolUse.getUser()->getParentOfType<FuncOp>();
for (FuncOp &funcOp : normalizableFuncs) {
func::FuncOp parentFuncOp =
symbolUse.getUser()->getParentOfType<func::FuncOp>();
for (func::FuncOp &funcOp : normalizableFuncs) {
if (parentFuncOp == funcOp) {
setCalleesAndCallersNonNormalizable(funcOp, moduleOp,
normalizableFuncs);
Expand All @@ -128,8 +131,8 @@ void NormalizeMemRefs::setCalleesAndCallersNonNormalizable(
// Functions called by this function.
funcOp.walk([&](func::CallOp callOp) {
StringAttr callee = callOp.getCalleeAttr().getAttr();
for (FuncOp &funcOp : normalizableFuncs) {
// We compare FuncOp and callee's name.
for (func::FuncOp &funcOp : normalizableFuncs) {
// We compare func::FuncOp and callee's name.
if (callee == funcOp.getNameAttr()) {
setCalleesAndCallersNonNormalizable(funcOp, moduleOp,
normalizableFuncs);
Expand All @@ -146,7 +149,7 @@ void NormalizeMemRefs::setCalleesAndCallersNonNormalizable(
/// wherein even if the non-normalizable memref is not a part of the function's
/// argument or return type, we still label the entire function as
/// non-normalizable. We assume external functions to be normalizable.
bool NormalizeMemRefs::areMemRefsNormalizable(FuncOp funcOp) {
bool NormalizeMemRefs::areMemRefsNormalizable(func::FuncOp funcOp) {
// We assume external functions to be normalizable.
if (funcOp.isExternal())
return true;
Expand Down Expand Up @@ -191,7 +194,7 @@ bool NormalizeMemRefs::areMemRefsNormalizable(FuncOp funcOp) {
/// the calling function's signature.
/// TODO: An update to the calling function signature is required only if the
/// returned value is in turn used in ReturnOp of the calling function.
void NormalizeMemRefs::updateFunctionSignature(FuncOp funcOp,
void NormalizeMemRefs::updateFunctionSignature(func::FuncOp funcOp,
ModuleOp moduleOp) {
FunctionType functionType = funcOp.getFunctionType();
SmallVector<Type, 4> resultTypes;
Expand Down Expand Up @@ -239,7 +242,7 @@ void NormalizeMemRefs::updateFunctionSignature(FuncOp funcOp,
// function in ReturnOps, the caller function's signature will also change.
// Hence we record the caller function in 'funcOpsToUpdate' to update their
// signature as well.
llvm::SmallDenseSet<FuncOp, 8> funcOpsToUpdate;
llvm::SmallDenseSet<func::FuncOp, 8> funcOpsToUpdate;
// We iterate over all symbolic uses of the function and update the return
// type at the caller site.
Optional<SymbolTable::UseRange> symbolUses = funcOp.getSymbolUses(moduleOp);
Expand Down Expand Up @@ -301,7 +304,7 @@ void NormalizeMemRefs::updateFunctionSignature(FuncOp funcOp,
// required.
// TODO: Extend this for ops that are FunctionOpInterface. This would
// require creating an OpInterface for FunctionOpInterface ops.
FuncOp parentFuncOp = newCallOp->getParentOfType<FuncOp>();
func::FuncOp parentFuncOp = newCallOp->getParentOfType<func::FuncOp>();
funcOpsToUpdate.insert(parentFuncOp);
}
}
Expand All @@ -313,14 +316,14 @@ void NormalizeMemRefs::updateFunctionSignature(FuncOp funcOp,
// Updating the signature type of those functions which call the current
// function. Only if the return type of the current function has a normalized
// memref will the caller function become a candidate for signature update.
for (FuncOp parentFuncOp : funcOpsToUpdate)
for (func::FuncOp parentFuncOp : funcOpsToUpdate)
updateFunctionSignature(parentFuncOp, moduleOp);
}

/// Normalizes the memrefs within a function which includes those arising as a
/// result of AllocOps, CallOps and function's argument. The ModuleOp argument
/// is used to help update function's signature after normalization.
void NormalizeMemRefs::normalizeFuncOpMemRefs(FuncOp funcOp,
void NormalizeMemRefs::normalizeFuncOpMemRefs(func::FuncOp funcOp,
ModuleOp moduleOp) {
// Turn memrefs' non-identity layouts maps into ones with identity. Collect
// alloc ops first and then process since normalizeMemRef replaces/erases ops
Expand Down Expand Up @@ -477,7 +480,7 @@ void NormalizeMemRefs::normalizeFuncOpMemRefs(FuncOp funcOp,
/// normalized, and new operation containing them in the operation results is
/// returned. If all of the results of `oldOp` have no memrefs or memrefs
/// without affine map, `oldOp` is returned without modification.
Operation *NormalizeMemRefs::createOpResultsNormalized(FuncOp funcOp,
Operation *NormalizeMemRefs::createOpResultsNormalized(func::FuncOp funcOp,
Operation *oldOp) {
// Prepare OperationState to create newOp containing normalized memref in
// the operation results.
Expand Down
3 changes: 2 additions & 1 deletion mlir/lib/Dialect/Quant/Transforms/ConvertConst.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,7 @@ void ConvertConstPass::runOnOperation() {
(void)applyPatternsAndFoldGreedily(func, std::move(patterns));
}

std::unique_ptr<OperationPass<FuncOp>> mlir::quant::createConvertConstPass() {
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::quant::createConvertConstPass() {
return std::make_unique<ConvertConstPass>();
}
2 changes: 1 addition & 1 deletion mlir/lib/Dialect/Quant/Transforms/ConvertSimQuant.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ void ConvertSimulatedQuantPass::runOnOperation() {
signalPassFailure();
}

std::unique_ptr<OperationPass<FuncOp>>
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::quant::createConvertSimulatedQuantPass() {
return std::make_unique<ConvertSimulatedQuantPass>();
}
2 changes: 1 addition & 1 deletion mlir/lib/Dialect/SCF/Transforms/ForToWhile.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ struct ForLoopLoweringPattern : public OpRewritePattern<ForOp> {

struct ForToWhileLoop : public SCFForToWhileLoopBase<ForToWhileLoop> {
void runOnOperation() override {
FuncOp funcOp = getOperation();
func::FuncOp funcOp = getOperation();
MLIRContext *ctx = funcOp.getContext();
RewritePatternSet patterns(ctx);
patterns.add<ForLoopLoweringPattern>(ctx);
Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/Dialect/SCF/Transforms/LoopCanonicalization.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ struct AffineOpSCFCanonicalizationPattern : public OpRewritePattern<OpTy> {
struct SCFForLoopCanonicalization
: public SCFForLoopCanonicalizationBase<SCFForLoopCanonicalization> {
void runOnOperation() override {
FuncOp funcOp = getOperation();
func::FuncOp funcOp = getOperation();
MLIRContext *ctx = funcOp.getContext();
RewritePatternSet patterns(ctx);
scf::populateSCFForLoopCanonicalizationPatterns(patterns);
Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/Dialect/SCF/Transforms/LoopSpecialization.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ struct ForLoopSpecialization

struct ForLoopPeeling : public SCFForLoopPeelingBase<ForLoopPeeling> {
void runOnOperation() override {
FuncOp funcOp = getOperation();
func::FuncOp funcOp = getOperation();
MLIRContext *ctx = funcOp.getContext();
RewritePatternSet patterns(ctx);
patterns.add<ForLoopPeelingPattern>(ctx, skipPartial);
Expand Down
20 changes: 11 additions & 9 deletions mlir/lib/Dialect/SCF/Utils/Utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -98,9 +98,10 @@ scf::ForOp mlir::cloneWithNewYields(OpBuilder &b, scf::ForOp loop,
/// `outlinedFuncBody` to alloc simple canonicalizations.
// TODO: support more than single-block regions.
// TODO: more flexible constant handling.
FailureOr<FuncOp> mlir::outlineSingleBlockRegion(RewriterBase &rewriter,
Location loc, Region &region,
StringRef funcName) {
FailureOr<func::FuncOp> mlir::outlineSingleBlockRegion(RewriterBase &rewriter,
Location loc,
Region &region,
StringRef funcName) {
assert(!funcName.empty() && "funcName cannot be empty");
if (!region.hasOneBlock())
return failure();
Expand All @@ -110,7 +111,7 @@ FailureOr<FuncOp> mlir::outlineSingleBlockRegion(RewriterBase &rewriter,

// Outline before current function.
OpBuilder::InsertionGuard g(rewriter);
rewriter.setInsertionPoint(region.getParentOfType<FuncOp>());
rewriter.setInsertionPoint(region.getParentOfType<func::FuncOp>());

SetVector<Value> captures;
getUsedValuesDefinedAbove(region, captures);
Expand All @@ -132,7 +133,8 @@ FailureOr<FuncOp> mlir::outlineSingleBlockRegion(RewriterBase &rewriter,
FunctionType outlinedFuncType =
FunctionType::get(rewriter.getContext(), outlinedFuncArgTypes,
originalTerminator->getOperandTypes());
auto outlinedFunc = rewriter.create<FuncOp>(loc, funcName, outlinedFuncType);
auto outlinedFunc =
rewriter.create<func::FuncOp>(loc, funcName, outlinedFuncType);
Block *outlinedFuncBody = outlinedFunc.addEntryBlock();

// Merge blocks while replacing the original block operands.
Expand Down Expand Up @@ -198,12 +200,12 @@ FailureOr<FuncOp> mlir::outlineSingleBlockRegion(RewriterBase &rewriter,
return outlinedFunc;
}

LogicalResult mlir::outlineIfOp(RewriterBase &b, scf::IfOp ifOp, FuncOp *thenFn,
StringRef thenFnName, FuncOp *elseFn,
StringRef elseFnName) {
LogicalResult mlir::outlineIfOp(RewriterBase &b, scf::IfOp ifOp,
func::FuncOp *thenFn, StringRef thenFnName,
func::FuncOp *elseFn, StringRef elseFnName) {
IRRewriter rewriter(b);
Location loc = ifOp.getLoc();
FailureOr<FuncOp> outlinedFuncOpOrFailure;
FailureOr<func::FuncOp> outlinedFuncOpOrFailure;
if (thenFn && !ifOp.getThenRegion().empty()) {
outlinedFuncOpOrFailure = outlineSingleBlockRegion(
rewriter, loc, ifOp.getThenRegion(), thenFnName);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ void DecorateSPIRVCompositeTypeLayoutPass::runOnOperation() {
populateSPIRVLayoutInfoPatterns(patterns);
ConversionTarget target(*(module.getContext()));
target.addLegalDialect<spirv::SPIRVDialect>();
target.addLegalOp<FuncOp>();
target.addLegalOp<func::FuncOp>();
target.addDynamicallyLegalOp<spirv::GlobalVariableOp>(
[](spirv::GlobalVariableOp op) {
return VulkanLayoutUtils::isLegalType(op.type());
Expand Down
10 changes: 5 additions & 5 deletions mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -534,24 +534,24 @@ SPIRVTypeConverter::SPIRVTypeConverter(spirv::TargetEnvAttr targetAttr,
}

//===----------------------------------------------------------------------===//
// FuncOp Conversion Patterns
// func::FuncOp Conversion Patterns
//===----------------------------------------------------------------------===//

namespace {
/// A pattern for rewriting function signature to convert arguments of functions
/// to be of valid SPIR-V types.
class FuncOpConversion final : public OpConversionPattern<FuncOp> {
class FuncOpConversion final : public OpConversionPattern<func::FuncOp> {
public:
using OpConversionPattern<FuncOp>::OpConversionPattern;
using OpConversionPattern<func::FuncOp>::OpConversionPattern;

LogicalResult
matchAndRewrite(FuncOp funcOp, OpAdaptor adaptor,
matchAndRewrite(func::FuncOp funcOp, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override;
};
} // namespace

LogicalResult
FuncOpConversion::matchAndRewrite(FuncOp funcOp, OpAdaptor adaptor,
FuncOpConversion::matchAndRewrite(func::FuncOp funcOp, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const {
auto fnType = funcOp.getFunctionType();
if (fnType.getNumResults() > 1)
Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Dialect/Shape/IR/Shape.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1190,13 +1190,13 @@ void FunctionLibraryOp::build(OpBuilder &builder, OperationState &result,
::mlir::SymbolTable::getSymbolAttrName(), builder.getStringAttr(name)));
}

FuncOp FunctionLibraryOp::getShapeFunction(Operation *op) {
func::FuncOp FunctionLibraryOp::getShapeFunction(Operation *op) {
auto attr = getMapping()
.get(op->getName().getIdentifier())
.dyn_cast_or_null<FlatSymbolRefAttr>();
if (!attr)
return nullptr;
return lookupSymbol<FuncOp>(attr);
return lookupSymbol<func::FuncOp>(attr);
}

ParseResult FunctionLibraryOp::parse(OpAsmParser &parser,
Expand Down
2 changes: 1 addition & 1 deletion mlir/lib/Dialect/Shape/Transforms/Bufferize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,6 @@ struct ShapeBufferizePass : public ShapeBufferizeBase<ShapeBufferizePass> {
};
} // namespace

std::unique_ptr<OperationPass<FuncOp>> mlir::createShapeBufferizePass() {
std::unique_ptr<OperationPass<func::FuncOp>> mlir::createShapeBufferizePass() {
return std::make_unique<ShapeBufferizePass>();
}
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ void mlir::populateRemoveShapeConstraintsPatterns(RewritePatternSet &patterns) {
patterns.getContext());
}

std::unique_ptr<OperationPass<FuncOp>>
std::unique_ptr<OperationPass<func::FuncOp>>
mlir::createRemoveShapeConstraintsPass() {
return std::make_unique<RemoveShapeConstraintsPass>();
}
Original file line number Diff line number Diff line change
Expand Up @@ -30,25 +30,25 @@ using namespace mlir::sparse_tensor;
void mlir::sparse_tensor::buildSparseCompiler(
OpPassManager &pm, const SparseCompilerOptions &options) {
// TODO(wrengr): ensure the original `pm` is for ModuleOp
pm.addNestedPass<FuncOp>(createLinalgGeneralizationPass());
pm.addNestedPass<func::FuncOp>(createLinalgGeneralizationPass());
pm.addPass(createLinalgElementwiseOpFusionPass());
pm.addPass(createSparsificationPass(options.sparsificationOptions()));
pm.addPass(createSparseTensorConversionPass(
options.sparseTensorConversionOptions()));
pm.addNestedPass<FuncOp>(createLinalgBufferizePass());
pm.addNestedPass<FuncOp>(vector::createVectorBufferizePass());
pm.addNestedPass<FuncOp>(createConvertLinalgToLoopsPass());
pm.addNestedPass<FuncOp>(createConvertVectorToSCFPass());
pm.addNestedPass<FuncOp>(createConvertSCFToCFPass());
pm.addNestedPass<func::FuncOp>(createLinalgBufferizePass());
pm.addNestedPass<func::FuncOp>(vector::createVectorBufferizePass());
pm.addNestedPass<func::FuncOp>(createConvertLinalgToLoopsPass());
pm.addNestedPass<func::FuncOp>(createConvertVectorToSCFPass());
pm.addNestedPass<func::FuncOp>(createConvertSCFToCFPass());
pm.addPass(func::createFuncBufferizePass());
pm.addPass(arith::createConstantBufferizePass());
pm.addNestedPass<FuncOp>(createTensorBufferizePass());
pm.addNestedPass<FuncOp>(
pm.addNestedPass<func::FuncOp>(createTensorBufferizePass());
pm.addNestedPass<func::FuncOp>(
mlir::bufferization::createFinalizingBufferizePass());
pm.addPass(createLowerAffinePass());
pm.addPass(createConvertVectorToLLVMPass(options.lowerVectorToLLVMOptions()));
pm.addPass(createMemRefToLLVMPass());
pm.addNestedPass<FuncOp>(createConvertMathToLLVMPass());
pm.addNestedPass<func::FuncOp>(createConvertMathToLLVMPass());
pm.addPass(createConvertFuncToLLVMPass());
pm.addPass(createReconcileUnrealizedCastsPass());
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,10 +57,10 @@ static FlatSymbolRefAttr getFunc(Operation *op, StringRef name,
MLIRContext *context = op->getContext();
auto module = op->getParentOfType<ModuleOp>();
auto result = SymbolRefAttr::get(context, name);
auto func = module.lookupSymbol<FuncOp>(result.getAttr());
auto func = module.lookupSymbol<func::FuncOp>(result.getAttr());
if (!func) {
OpBuilder moduleBuilder(module.getBodyRegion());
func = moduleBuilder.create<FuncOp>(
func = moduleBuilder.create<func::FuncOp>(
op->getLoc(), name,
FunctionType::get(context, operands.getTypes(), resultType));
func.setPrivate();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ struct SparseTensorConversionPass
// All dynamic rules below accept new function, call, return, and tensor
// dim and cast operations as legal output of the rewriting provided that
// all sparse tensor types have been fully rewritten.
target.addDynamicallyLegalOp<FuncOp>([&](FuncOp op) {
target.addDynamicallyLegalOp<func::FuncOp>([&](func::FuncOp op) {
return converter.isSignatureLegal(op.getFunctionType());
});
target.addDynamicallyLegalOp<func::CallOp>([&](func::CallOp op) {
Expand All @@ -118,8 +118,8 @@ struct SparseTensorConversionPass
SparseTensorConversionOptions options(
sparseToSparseConversionStrategy(sparseToSparse));
// Populate with rules and apply rewriting rules.
populateFunctionOpInterfaceTypeConversionPattern<FuncOp>(patterns,
converter);
populateFunctionOpInterfaceTypeConversionPattern<func::FuncOp>(patterns,
converter);
populateCallOpTypeConversionPattern(patterns, converter);
populateSparseTensorConversionPatterns(converter, patterns, options);
if (failed(applyPartialConversion(getOperation(), target,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -281,7 +281,7 @@ static bool computeIterationGraph(Merger &merger, linalg::GenericOp op,
/// Returns true if tensor has an in-place annotation.
static bool isInPlace(Value val) {
if (auto arg = val.dyn_cast<BlockArgument>())
if (auto funcOp = dyn_cast<FuncOp>(arg.getOwner()->getParentOp()))
if (auto funcOp = dyn_cast<func::FuncOp>(arg.getOwner()->getParentOp()))
if (auto attr = funcOp.getArgAttrOfType<BoolAttr>(
arg.getArgNumber(),
bufferization::BufferizableOpInterface::kInplaceableAttrName))
Expand Down
4 changes: 2 additions & 2 deletions mlir/lib/Dialect/Tosa/Transforms/TosaInferShapes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -279,7 +279,7 @@ void propagateShapesInRegion(Region &region) {
struct TosaInferShapes : public TosaInferShapesBase<TosaInferShapes> {
public:
void runOnOperation() override {
FuncOp func = getOperation();
func::FuncOp func = getOperation();

IRRewriter rewriter(func.getContext());

Expand All @@ -288,7 +288,7 @@ struct TosaInferShapes : public TosaInferShapesBase<TosaInferShapes> {
// Insert UnrealizedConversionCasts to guarantee ReturnOp agress with
// the FuncOp type.
func.walk([&](func::ReturnOp op) {
FuncOp parent = dyn_cast<FuncOp>(op->getParentOp());
func::FuncOp parent = dyn_cast<func::FuncOp>(op->getParentOp());
if (!parent)
return;

Expand Down
2 changes: 1 addition & 1 deletion mlir/test/IR/attribute.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -490,7 +490,7 @@ func @fn() { return }

// -----

// expected-error @+1 {{referencing to a 'FuncOp' symbol}}
// expected-error @+1 {{referencing to a 'func::FuncOp' symbol}}
"test.symbol_ref_attr"() {symbol = @foo} : () -> ()

// -----
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/lib/Dialect/Affine/TestAffineDataCopy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ using namespace mlir;
namespace {

struct TestAffineDataCopy
: public PassWrapper<TestAffineDataCopy, OperationPass<FuncOp>> {
: public PassWrapper<TestAffineDataCopy, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestAffineDataCopy)

StringRef getArgument() const final { return PASS_NAME; }
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ using namespace mlir;
namespace {
struct TestAffineLoopParametricTiling
: public PassWrapper<TestAffineLoopParametricTiling,
OperationPass<FuncOp>> {
OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestAffineLoopParametricTiling)

StringRef getArgument() const final { return "test-affine-parametric-tile"; }
Expand All @@ -41,7 +41,7 @@ static void checkIfTilingParametersExist(ArrayRef<AffineForOp> band) {
assert(!band.empty() && "no loops in input band");
AffineForOp topLoop = band[0];

if (FuncOp funcOp = dyn_cast<FuncOp>(topLoop->getParentOp()))
if (func::FuncOp funcOp = dyn_cast<func::FuncOp>(topLoop->getParentOp()))
assert(funcOp.getNumArguments() >= band.size() && "Too few tile sizes");
}

Expand Down
2 changes: 1 addition & 1 deletion mlir/test/lib/Dialect/Affine/TestLoopFusion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ static llvm::cl::opt<bool> clTestLoopFusionTransformation(
namespace {

struct TestLoopFusion
: public PassWrapper<TestLoopFusion, OperationPass<FuncOp>> {
: public PassWrapper<TestLoopFusion, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestLoopFusion)

StringRef getArgument() const final { return "test-loop-fusion"; }
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/lib/Dialect/Affine/TestVectorizationUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ static llvm::cl::opt<bool> clTestVecAffineLoopNest(

namespace {
struct VectorizerTestPass
: public PassWrapper<VectorizerTestPass, OperationPass<FuncOp>> {
: public PassWrapper<VectorizerTestPass, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(VectorizerTestPass)

static constexpr auto kTestAffineMapOpName = "test_affine_map";
Expand Down Expand Up @@ -241,7 +241,7 @@ void VectorizerTestPass::testVecAffineLoopNest() {

void VectorizerTestPass::runOnOperation() {
// Only support single block functions at this point.
FuncOp f = getOperation();
func::FuncOp f = getOperation();
if (!llvm::hasSingleElement(f))
return;

Expand Down
4 changes: 2 additions & 2 deletions mlir/test/lib/Dialect/DLTI/TestDataLayoutQuery.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,13 @@ namespace {
/// attributes containing the results of data layout queries for operation
/// result types.
struct TestDataLayoutQuery
: public PassWrapper<TestDataLayoutQuery, OperationPass<FuncOp>> {
: public PassWrapper<TestDataLayoutQuery, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestDataLayoutQuery)

StringRef getArgument() const final { return "test-data-layout-query"; }
StringRef getDescription() const final { return "Test data layout queries"; }
void runOnOperation() override {
FuncOp func = getOperation();
func::FuncOp func = getOperation();
Builder builder(func.getContext());
const DataLayoutAnalysis &layouts = getAnalysis<DataLayoutAnalysis>();

Expand Down
2 changes: 1 addition & 1 deletion mlir/test/lib/Dialect/Func/TestDecomposeCallGraphTypes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ struct TestDecomposeCallGraphTypes
});
target.addDynamicallyLegalOp<func::CallOp>(
[&](func::CallOp op) { return typeConverter.isLegal(op); });
target.addDynamicallyLegalOp<FuncOp>([&](FuncOp op) {
target.addDynamicallyLegalOp<func::FuncOp>([&](func::FuncOp op) {
return typeConverter.isSignatureLegal(op.getFunctionType());
});

Expand Down
5 changes: 3 additions & 2 deletions mlir/test/lib/Dialect/Linalg/TestLinalgCodegenStrategy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,8 @@ using namespace mlir::linalg;

namespace {
struct TestLinalgCodegenStrategy
: public PassWrapper<TestLinalgCodegenStrategy, OperationPass<FuncOp>> {
: public PassWrapper<TestLinalgCodegenStrategy,
OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestLinalgCodegenStrategy)

StringRef getArgument() const final { return "test-linalg-codegen-strategy"; }
Expand Down Expand Up @@ -222,7 +223,7 @@ void TestLinalgCodegenStrategy::runStrategy(
.enableContractionLowering()
.enableTransferToSCFConversion());
// Created a nested OpPassManager and run.
FuncOp funcOp = getOperation();
func::FuncOp funcOp = getOperation();
OpPassManager dynamicPM("func.func");
strategy.configurePassPipeline(dynamicPM, funcOp.getContext(), runEnablePass);
if (failed(runPipeline(dynamicPM, funcOp)))
Expand Down
5 changes: 3 additions & 2 deletions mlir/test/lib/Dialect/Linalg/TestLinalgElementwiseFusion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,8 @@ static bool setFusedOpOperandLimit(const OpResult &producer,

namespace {
struct TestLinalgElementwiseFusion
: public PassWrapper<TestLinalgElementwiseFusion, OperationPass<FuncOp>> {
: public PassWrapper<TestLinalgElementwiseFusion,
OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestLinalgElementwiseFusion)

TestLinalgElementwiseFusion() = default;
Expand Down Expand Up @@ -96,7 +97,7 @@ struct TestLinalgElementwiseFusion

void runOnOperation() override {
MLIRContext *context = &this->getContext();
FuncOp funcOp = this->getOperation();
func::FuncOp funcOp = this->getOperation();

if (fuseGenericOps) {
RewritePatternSet fusionPatterns(context);
Expand Down
14 changes: 7 additions & 7 deletions mlir/test/lib/Dialect/Linalg/TestLinalgFusionTransforms.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ namespace {
template <LinalgTilingLoopType LoopType>
struct TestLinalgFusionTransforms
: public PassWrapper<TestLinalgFusionTransforms<LoopType>,
OperationPass<FuncOp>> {
OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestLinalgFusionTransforms)

void getDependentDialects(DialectRegistry &registry) const override {
Expand All @@ -125,7 +125,7 @@ struct TestLinalgFusionTransforms

void runOnOperation() override {
MLIRContext *context = &this->getContext();
FuncOp funcOp = this->getOperation();
func::FuncOp funcOp = this->getOperation();
RewritePatternSet fusionPatterns(context);
Aliases alias;
LinalgDependenceGraph dependenceGraph =
Expand Down Expand Up @@ -177,7 +177,7 @@ struct TestLinalgFusionTransformsTiledLoops
};
} // namespace

static LogicalResult fuseLinalgOpsGreedily(FuncOp f) {
static LogicalResult fuseLinalgOpsGreedily(func::FuncOp f) {
OpBuilder b(f);
DenseSet<Operation *> eraseSet;

Expand Down Expand Up @@ -237,7 +237,7 @@ static LogicalResult fuseLinalgOpsGreedily(FuncOp f) {

namespace {
struct TestLinalgGreedyFusion
: public PassWrapper<TestLinalgGreedyFusion, OperationPass<FuncOp>> {
: public PassWrapper<TestLinalgGreedyFusion, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestLinalgGreedyFusion)

void getDependentDialects(DialectRegistry &registry) const override {
Expand All @@ -255,7 +255,7 @@ struct TestLinalgGreedyFusion
patterns.add<ExtractSliceOfPadTensorSwapPattern>(context);
scf::populateSCFForLoopCanonicalizationPatterns(patterns);
FrozenRewritePatternSet frozenPatterns(std::move(patterns));
OpPassManager pm(FuncOp::getOperationName());
OpPassManager pm(func::FuncOp::getOperationName());
pm.addPass(createLoopInvariantCodeMotionPass());
pm.addPass(createCanonicalizerPass());
pm.addPass(createCSEPass());
Expand All @@ -271,7 +271,7 @@ struct TestLinalgGreedyFusion
/// testing.
struct TestLinalgTileAndFuseSequencePass
: public PassWrapper<TestLinalgTileAndFuseSequencePass,
OperationPass<FuncOp>> {
OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(
TestLinalgTileAndFuseSequencePass)

Expand All @@ -294,7 +294,7 @@ struct TestLinalgTileAndFuseSequencePass
}

void runOnOperation() override {
FuncOp funcOp = getOperation();
func::FuncOp funcOp = getOperation();
auto &blocks = funcOp.getBody().getBlocks();
if (!llvm::hasSingleElement(blocks)) {
return;
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/lib/Dialect/Linalg/TestLinalgHoisting.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ using namespace mlir::linalg;

namespace {
struct TestLinalgHoisting
: public PassWrapper<TestLinalgHoisting, OperationPass<FuncOp>> {
: public PassWrapper<TestLinalgHoisting, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestLinalgHoisting)

TestLinalgHoisting() = default;
Expand Down
24 changes: 12 additions & 12 deletions mlir/test/lib/Dialect/Linalg/TestLinalgTransforms.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ using namespace mlir::linalg;

namespace {
struct TestLinalgTransforms
: public PassWrapper<TestLinalgTransforms, OperationPass<FuncOp>> {
: public PassWrapper<TestLinalgTransforms, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestLinalgTransforms)

TestLinalgTransforms() = default;
Expand Down Expand Up @@ -142,7 +142,7 @@ struct TestLinalgTransforms
};
} // namespace

static void applyPatterns(FuncOp funcOp) {
static void applyPatterns(func::FuncOp funcOp) {
MLIRContext *ctx = funcOp.getContext();
RewritePatternSet patterns(ctx);

Expand Down Expand Up @@ -288,7 +288,7 @@ static void applyPatterns(FuncOp funcOp) {
}

static void fillL1TilingAndMatmulToVectorPatterns(
FuncOp funcOp, StringRef startMarker,
func::FuncOp funcOp, StringRef startMarker,
SmallVectorImpl<RewritePatternSet> &patternsVector) {
MLIRContext *ctx = funcOp.getContext();
patternsVector.emplace_back(
Expand Down Expand Up @@ -531,7 +531,7 @@ static void fillTileFuseAndDistributePatterns(MLIRContext *context,
}

static void
applyMatmulToVectorPatterns(FuncOp funcOp,
applyMatmulToVectorPatterns(func::FuncOp funcOp,
bool testMatmulToVectorPatterns1dTiling,
bool testMatmulToVectorPatterns2dTiling) {
MLIRContext *ctx = funcOp.getContext();
Expand Down Expand Up @@ -564,14 +564,14 @@ applyMatmulToVectorPatterns(FuncOp funcOp,
(void)applyStagedPatterns(funcOp, frozenStage1Patterns, stage2Patterns);
}

static void applyVectorTransferForwardingPatterns(FuncOp funcOp) {
static void applyVectorTransferForwardingPatterns(func::FuncOp funcOp) {
RewritePatternSet forwardPattern(funcOp.getContext());
forwardPattern.add<LinalgCopyVTRForwardingPattern>(funcOp.getContext());
forwardPattern.add<LinalgCopyVTWForwardingPattern>(funcOp.getContext());
(void)applyPatternsAndFoldGreedily(funcOp, std::move(forwardPattern));
}

static void applyLinalgToVectorPatterns(FuncOp funcOp) {
static void applyLinalgToVectorPatterns(func::FuncOp funcOp) {
RewritePatternSet patterns(funcOp.getContext());
auto *ctx = funcOp.getContext();
patterns.add<LinalgVectorizationPattern>(
Expand All @@ -583,25 +583,25 @@ static void applyLinalgToVectorPatterns(FuncOp funcOp) {
(void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns));
}

static void applyPadTensorToGenericPatterns(FuncOp funcOp) {
static void applyPadTensorToGenericPatterns(func::FuncOp funcOp) {
RewritePatternSet patterns(funcOp.getContext());
patterns.add<PadOpTransformationPattern>(funcOp.getContext());
(void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns));
}

static void applyGeneralizePadTensorPatterns(FuncOp funcOp) {
static void applyGeneralizePadTensorPatterns(func::FuncOp funcOp) {
RewritePatternSet patterns(funcOp.getContext());
patterns.add<GeneralizePadOpPattern>(funcOp.getContext());
(void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns));
}

static void applyExtractSliceOfPadTensorSwapPattern(FuncOp funcOp) {
static void applyExtractSliceOfPadTensorSwapPattern(func::FuncOp funcOp) {
RewritePatternSet patterns(funcOp.getContext());
patterns.add<ExtractSliceOfPadTensorSwapPattern>(funcOp.getContext());
(void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns));
}

static void applyTilePattern(FuncOp funcOp, const std::string &loopType,
static void applyTilePattern(func::FuncOp funcOp, const std::string &loopType,
ArrayRef<int64_t> tileSizes,
ArrayRef<int64_t> peeledLoops,
bool scalarizeDynamicDims) {
Expand All @@ -628,7 +628,7 @@ static void applyTilePattern(FuncOp funcOp, const std::string &loopType,
(void)applyPatternsAndFoldGreedily(funcOp, std::move(tilingPattern));
}

static void applySplitReduction(FuncOp funcOp) {
static void applySplitReduction(func::FuncOp funcOp) {
RewritePatternSet patterns(funcOp.getContext());
linalg::populateSplitReductionPattern(
patterns,
Expand All @@ -642,7 +642,7 @@ static void applySplitReduction(FuncOp funcOp) {
(void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns));
}

static void applyBubbleUpExtractSliceOpPattern(FuncOp funcOp) {
static void applyBubbleUpExtractSliceOpPattern(func::FuncOp funcOp) {
RewritePatternSet patterns(funcOp.getContext());
populateBubbleUpExtractSliceOpPatterns(patterns);
(void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns));
Expand Down
8 changes: 4 additions & 4 deletions mlir/test/lib/Dialect/SCF/TestSCFUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,15 +26,15 @@ using namespace mlir;

namespace {
struct TestSCFForUtilsPass
: public PassWrapper<TestSCFForUtilsPass, OperationPass<FuncOp>> {
: public PassWrapper<TestSCFForUtilsPass, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestSCFForUtilsPass)

StringRef getArgument() const final { return "test-scf-for-utils"; }
StringRef getDescription() const final { return "test scf.for utils"; }
explicit TestSCFForUtilsPass() = default;

void runOnOperation() override {
FuncOp func = getOperation();
func::FuncOp func = getOperation();
SmallVector<scf::ForOp, 4> toErase;

func.walk([&](Operation *fakeRead) {
Expand Down Expand Up @@ -70,7 +70,7 @@ struct TestSCFIfUtilsPass
int count = 0;
getOperation().walk([&](scf::IfOp ifOp) {
auto strCount = std::to_string(count++);
FuncOp thenFn, elseFn;
func::FuncOp thenFn, elseFn;
OpBuilder b(ifOp);
IRRewriter rewriter(b);
if (failed(outlineIfOp(rewriter, ifOp, &thenFn,
Expand Down Expand Up @@ -98,7 +98,7 @@ static const StringLiteral kTestPipeliningAnnotationIteration =
"__test_pipelining_iteration";

struct TestSCFPipeliningPass
: public PassWrapper<TestSCFPipeliningPass, OperationPass<FuncOp>> {
: public PassWrapper<TestSCFPipeliningPass, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestSCFPipeliningPass)

TestSCFPipeliningPass() = default;
Expand Down
6 changes: 3 additions & 3 deletions mlir/test/lib/Dialect/SPIRV/TestAvailability.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ using namespace mlir;
namespace {
/// A pass for testing SPIR-V op availability.
struct PrintOpAvailability
: public PassWrapper<PrintOpAvailability, OperationPass<FuncOp>> {
: public PassWrapper<PrintOpAvailability, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(PrintOpAvailability)

void runOnOperation() override;
Expand Down Expand Up @@ -106,7 +106,7 @@ void registerPrintSpirvAvailabilityPass() {
namespace {
/// A pass for testing SPIR-V op availability.
struct ConvertToTargetEnv
: public PassWrapper<ConvertToTargetEnv, OperationPass<FuncOp>> {
: public PassWrapper<ConvertToTargetEnv, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(ConvertToTargetEnv)

StringRef getArgument() const override { return "test-spirv-target-env"; }
Expand Down Expand Up @@ -149,7 +149,7 @@ struct ConvertToSubgroupBallot : public RewritePattern {

void ConvertToTargetEnv::runOnOperation() {
MLIRContext *context = &getContext();
FuncOp fn = getOperation();
func::FuncOp fn = getOperation();

auto targetEnv = fn.getOperation()
->getAttr(spirv::getTargetEnvAttrName())
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/lib/Dialect/Shape/TestShapeFunctions.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ void ReportShapeFnPass::runOnOperation() {
return true;
}
if (auto symbol = op->getAttrOfType<SymbolRefAttr>(shapeFnId)) {
auto fn = cast<FuncOp>(SymbolTable::lookupSymbolIn(module, symbol));
auto fn = cast<func::FuncOp>(SymbolTable::lookupSymbolIn(module, symbol));
op->emitRemark() << "associated shape function: " << fn.getName();
return true;
}
Expand All @@ -71,7 +71,7 @@ void ReportShapeFnPass::runOnOperation() {
}
}

module.getBodyRegion().walk([&](FuncOp func) {
module.getBodyRegion().walk([&](func::FuncOp func) {
// Skip ops in the shape function library.
if (isa<shape::FunctionLibraryOp>(func->getParentOp()))
return;
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/lib/Dialect/Test/TestOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -317,7 +317,7 @@ def : Pat<(OpWithEnum ConstantAttr<TestEnumAttr,

def SymbolRefOp : TEST_Op<"symbol_ref_attr"> {
let arguments = (ins
Confined<FlatSymbolRefAttr, [ReferToOp<"FuncOp">]>:$symbol
Confined<FlatSymbolRefAttr, [ReferToOp<"func::FuncOp">]>:$symbol
);
}

Expand Down Expand Up @@ -903,7 +903,7 @@ def OpFuncRef : TEST_Op<"op_funcref"> {
let description = [{
The "test.op_funcref" is a test op with a reference to a function symbol.
}];
let builders = [OpBuilder<(ins "::mlir::FuncOp":$function)>];
let builders = [OpBuilder<(ins "::mlir::func::FuncOp":$function)>];
}

// Pattern add the argument plus a increasing static number hidden in
Expand Down
32 changes: 17 additions & 15 deletions mlir/test/lib/Dialect/Test/TestPatterns.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ struct FolderCommutativeOp2WithConstant
};

struct TestPatternDriver
: public PassWrapper<TestPatternDriver, OperationPass<FuncOp>> {
: public PassWrapper<TestPatternDriver, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestPatternDriver)

StringRef getArgument() const final { return "test-patterns"; }
Expand Down Expand Up @@ -176,7 +176,7 @@ namespace {
template <typename OpTy>
static void invokeCreateWithInferredReturnType(Operation *op) {
auto *context = op->getContext();
auto fop = op->getParentOfType<FuncOp>();
auto fop = op->getParentOfType<func::FuncOp>();
auto location = UnknownLoc::get(context);
OpBuilder b(op);
b.setInsertionPointAfter(op);
Expand Down Expand Up @@ -215,7 +215,7 @@ static void reifyReturnShape(Operation *op) {
}

struct TestReturnTypeDriver
: public PassWrapper<TestReturnTypeDriver, OperationPass<FuncOp>> {
: public PassWrapper<TestReturnTypeDriver, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestReturnTypeDriver)

void getDependentDialects(DialectRegistry &registry) const override {
Expand Down Expand Up @@ -257,7 +257,8 @@ struct TestReturnTypeDriver

namespace {
struct TestDerivedAttributeDriver
: public PassWrapper<TestDerivedAttributeDriver, OperationPass<FuncOp>> {
: public PassWrapper<TestDerivedAttributeDriver,
OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestDerivedAttributeDriver)

StringRef getArgument() const final { return "test-derived-attr"; }
Expand Down Expand Up @@ -690,8 +691,8 @@ struct TestLegalizePatternDriver
TestNestedOpCreationUndoRewrite, TestReplaceEraseOp,
TestCreateUnregisteredOp>(&getContext());
patterns.add<TestDropOpSignatureConversion>(&getContext(), converter);
mlir::populateFunctionOpInterfaceTypeConversionPattern<FuncOp>(patterns,
converter);
mlir::populateFunctionOpInterfaceTypeConversionPattern<func::FuncOp>(
patterns, converter);
mlir::populateCallOpTypeConversionPattern(patterns, converter);

// Define the conversion target used for the test.
Expand All @@ -706,7 +707,7 @@ struct TestLegalizePatternDriver
return llvm::none_of(op.getOperandTypes(),
[](Type type) { return type.isF32(); });
});
target.addDynamicallyLegalOp<FuncOp>([&](FuncOp op) {
target.addDynamicallyLegalOp<func::FuncOp>([&](func::FuncOp op) {
return converter.isSignatureLegal(op.getFunctionType()) &&
converter.isLegal(&op.getBody());
});
Expand All @@ -726,7 +727,7 @@ struct TestLegalizePatternDriver
});

// Check support for marking certain operations as recursively legal.
target.markOpRecursivelyLegal<FuncOp, ModuleOp>([](Operation *op) {
target.markOpRecursivelyLegal<func::FuncOp, ModuleOp>([](Operation *op) {
return static_cast<bool>(
op->getAttrOfType<UnitAttr>("test.recursively_legal"));
});
Expand Down Expand Up @@ -871,7 +872,7 @@ struct TestRemapValueInRegion
};

struct TestRemappedValue
: public mlir::PassWrapper<TestRemappedValue, OperationPass<FuncOp>> {
: public mlir::PassWrapper<TestRemappedValue, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestRemappedValue)

StringRef getArgument() const final { return "test-remapped-value"; }
Expand All @@ -888,7 +889,7 @@ struct TestRemappedValue
patterns.add<TestRemapValueInRegion>(typeConverter, &getContext());

mlir::ConversionTarget target(getContext());
target.addLegalOp<ModuleOp, FuncOp, TestReturnOp>();
target.addLegalOp<ModuleOp, func::FuncOp, TestReturnOp>();

// Expect the type_producer/type_consumer operations to only operate on f64.
target.addDynamicallyLegalOp<TestTypeProducerOp>(
Expand Down Expand Up @@ -931,7 +932,8 @@ struct RemoveTestDialectOps : public RewritePattern {
};

struct TestUnknownRootOpDriver
: public mlir::PassWrapper<TestUnknownRootOpDriver, OperationPass<FuncOp>> {
: public mlir::PassWrapper<TestUnknownRootOpDriver,
OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestUnknownRootOpDriver)

StringRef getArgument() const final {
Expand Down Expand Up @@ -1142,7 +1144,7 @@ struct TestTypeConversionDriver
(recursiveType &&
recursiveType.getName() == "outer_converted_type");
});
target.addDynamicallyLegalOp<FuncOp>([&](FuncOp op) {
target.addDynamicallyLegalOp<func::FuncOp>([&](func::FuncOp op) {
return converter.isSignatureLegal(op.getFunctionType()) &&
converter.isLegal(&op.getBody());
});
Expand All @@ -1162,8 +1164,8 @@ struct TestTypeConversionDriver
TestTestSignatureConversionNoConverter>(converter,
&getContext());
patterns.add<TestTypeConversionAnotherProducer>(&getContext());
mlir::populateFunctionOpInterfaceTypeConversionPattern<FuncOp>(patterns,
converter);
mlir::populateFunctionOpInterfaceTypeConversionPattern<func::FuncOp>(
patterns, converter);

if (failed(applyPartialConversion(getOperation(), target,
std::move(patterns))))
Expand Down Expand Up @@ -1312,7 +1314,7 @@ struct TestMergeBlocksPatternDriver
patterns.add<TestMergeBlock, TestUndoBlocksMerge, TestMergeSingleBlockOps>(
context);
ConversionTarget target(*context);
target.addLegalOp<FuncOp, ModuleOp, TerminatorOp, TestBranchOp,
target.addLegalOp<func::FuncOp, ModuleOp, TerminatorOp, TestBranchOp,
TestTypeConsumerOp, TestTypeProducerOp, TestReturnOp>();
target.addIllegalOp<ILLegalOpF>();

Expand Down
2 changes: 1 addition & 1 deletion mlir/test/lib/Dialect/Test/TestTraits.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ OpFoldResult TestInvolutionTraitSuccesfulOperationFolderOp::fold(

namespace {
struct TestTraitFolder
: public PassWrapper<TestTraitFolder, OperationPass<FuncOp>> {
: public PassWrapper<TestTraitFolder, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestTraitFolder)

StringRef getArgument() const final { return "test-trait-folder"; }
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/lib/Dialect/Tosa/TosaTestPasses.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ ConvertTosaConv2DOp::matchAndRewrite(Operation *op,
namespace {

struct TosaTestQuantUtilAPI
: public PassWrapper<TosaTestQuantUtilAPI, OperationPass<FuncOp>> {
: public PassWrapper<TosaTestQuantUtilAPI, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TosaTestQuantUtilAPI)

StringRef getArgument() const final { return PASS_NAME; }
Expand Down
42 changes: 24 additions & 18 deletions mlir/test/lib/Dialect/Vector/TestVectorTransforms.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,8 @@ using namespace mlir::vector;
namespace {

struct TestVectorToVectorLowering
: public PassWrapper<TestVectorToVectorLowering, OperationPass<FuncOp>> {
: public PassWrapper<TestVectorToVectorLowering,
OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestVectorToVectorLowering)

TestVectorToVectorLowering() = default;
Expand Down Expand Up @@ -104,7 +105,8 @@ struct TestVectorToVectorLowering
};

struct TestVectorContractionLowering
: public PassWrapper<TestVectorContractionLowering, OperationPass<FuncOp>> {
: public PassWrapper<TestVectorContractionLowering,
OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestVectorContractionLowering)

StringRef getArgument() const final {
Expand Down Expand Up @@ -179,7 +181,8 @@ struct TestVectorContractionLowering
};

struct TestVectorTransposeLowering
: public PassWrapper<TestVectorTransposeLowering, OperationPass<FuncOp>> {
: public PassWrapper<TestVectorTransposeLowering,
OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestVectorTransposeLowering)

StringRef getArgument() const final {
Expand Down Expand Up @@ -253,7 +256,8 @@ struct TestVectorTransposeLowering
};

struct TestVectorUnrollingPatterns
: public PassWrapper<TestVectorUnrollingPatterns, OperationPass<FuncOp>> {
: public PassWrapper<TestVectorUnrollingPatterns,
OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestVectorUnrollingPatterns)

StringRef getArgument() const final {
Expand Down Expand Up @@ -328,7 +332,8 @@ struct TestVectorUnrollingPatterns
};

struct TestVectorDistributePatterns
: public PassWrapper<TestVectorDistributePatterns, OperationPass<FuncOp>> {
: public PassWrapper<TestVectorDistributePatterns,
OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestVectorDistributePatterns)

StringRef getArgument() const final {
Expand All @@ -352,7 +357,7 @@ struct TestVectorDistributePatterns
void runOnOperation() override {
MLIRContext *ctx = &getContext();
RewritePatternSet patterns(ctx);
FuncOp func = getOperation();
func::FuncOp func = getOperation();
func.walk([&](arith::AddFOp op) {
OpBuilder builder(op);
if (auto vecType = op.getType().dyn_cast<VectorType>()) {
Expand Down Expand Up @@ -387,7 +392,8 @@ struct TestVectorDistributePatterns
};

struct TestVectorToLoopPatterns
: public PassWrapper<TestVectorToLoopPatterns, OperationPass<FuncOp>> {
: public PassWrapper<TestVectorToLoopPatterns,
OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestVectorToLoopPatterns)

StringRef getArgument() const final { return "test-vector-to-forloop"; }
Expand All @@ -408,7 +414,7 @@ struct TestVectorToLoopPatterns
void runOnOperation() override {
MLIRContext *ctx = &getContext();
RewritePatternSet patterns(ctx);
FuncOp func = getOperation();
func::FuncOp func = getOperation();
func.walk([&](arith::AddFOp op) {
// Check that the operation type can be broken down into a loop.
VectorType type = op.getType().dyn_cast<VectorType>();
Expand Down Expand Up @@ -447,7 +453,7 @@ struct TestVectorToLoopPatterns

struct TestVectorTransferUnrollingPatterns
: public PassWrapper<TestVectorTransferUnrollingPatterns,
OperationPass<FuncOp>> {
OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(
TestVectorTransferUnrollingPatterns)

Expand Down Expand Up @@ -479,7 +485,7 @@ struct TestVectorTransferUnrollingPatterns

struct TestVectorTransferFullPartialSplitPatterns
: public PassWrapper<TestVectorTransferFullPartialSplitPatterns,
OperationPass<FuncOp>> {
OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(
TestVectorTransferFullPartialSplitPatterns)

Expand Down Expand Up @@ -519,7 +525,7 @@ struct TestVectorTransferFullPartialSplitPatterns
};

struct TestVectorTransferOpt
: public PassWrapper<TestVectorTransferOpt, OperationPass<FuncOp>> {
: public PassWrapper<TestVectorTransferOpt, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestVectorTransferOpt)

StringRef getArgument() const final { return "test-vector-transferop-opt"; }
Expand All @@ -531,7 +537,7 @@ struct TestVectorTransferOpt

struct TestVectorTransferLoweringPatterns
: public PassWrapper<TestVectorTransferLoweringPatterns,
OperationPass<FuncOp>> {
OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(
TestVectorTransferLoweringPatterns)

Expand All @@ -554,7 +560,7 @@ struct TestVectorTransferLoweringPatterns

struct TestVectorMultiReductionLoweringPatterns
: public PassWrapper<TestVectorMultiReductionLoweringPatterns,
OperationPass<FuncOp>> {
OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(
TestVectorMultiReductionLoweringPatterns)

Expand Down Expand Up @@ -588,7 +594,7 @@ struct TestVectorMultiReductionLoweringPatterns

struct TestVectorTransferCollapseInnerMostContiguousDims
: public PassWrapper<TestVectorTransferCollapseInnerMostContiguousDims,
OperationPass<FuncOp>> {
OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(
TestVectorTransferCollapseInnerMostContiguousDims)

Expand Down Expand Up @@ -618,7 +624,7 @@ struct TestVectorTransferCollapseInnerMostContiguousDims

struct TestVectorReduceToContractPatternsPatterns
: public PassWrapper<TestVectorReduceToContractPatternsPatterns,
OperationPass<FuncOp>> {
OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(
TestVectorReduceToContractPatternsPatterns)

Expand All @@ -638,7 +644,7 @@ struct TestVectorReduceToContractPatternsPatterns

struct TestVectorTransferDropUnitDimsPatterns
: public PassWrapper<TestVectorTransferDropUnitDimsPatterns,
OperationPass<FuncOp>> {
OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(
TestVectorTransferDropUnitDimsPatterns)

Expand All @@ -657,7 +663,7 @@ struct TestVectorTransferDropUnitDimsPatterns

struct TestFlattenVectorTransferPatterns
: public PassWrapper<TestFlattenVectorTransferPatterns,
OperationPass<FuncOp>> {
OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(
TestFlattenVectorTransferPatterns)

Expand All @@ -679,7 +685,7 @@ struct TestFlattenVectorTransferPatterns
};

struct TestVectorScanLowering
: public PassWrapper<TestVectorScanLowering, OperationPass<FuncOp>> {
: public PassWrapper<TestVectorScanLowering, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestVectorScanLowering)

StringRef getArgument() const final { return "test-vector-scan-lowering"; }
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/lib/IR/TestPrintInvalid.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ struct TestPrintInvalidPass
void runOnOperation() override {
Location loc = getOperation().getLoc();
OpBuilder builder(getOperation().getBodyRegion());
auto funcOp = builder.create<FuncOp>(
auto funcOp = builder.create<func::FuncOp>(
loc, "test", FunctionType::get(getOperation().getContext(), {}, {}));
funcOp.addEntryBlock();
// The created function is invalid because there is no return op.
Expand Down
8 changes: 4 additions & 4 deletions mlir/test/lib/IR/TestSlicing.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,12 +25,12 @@ using namespace mlir;
/// with name being the function name and a `suffix`.
static LogicalResult createBackwardSliceFunction(Operation *op,
StringRef suffix) {
FuncOp parentFuncOp = op->getParentOfType<FuncOp>();
func::FuncOp parentFuncOp = op->getParentOfType<func::FuncOp>();
OpBuilder builder(parentFuncOp);
Location loc = op->getLoc();
std::string clonedFuncOpName = parentFuncOp.getName().str() + suffix.str();
FuncOp clonedFuncOp = builder.create<FuncOp>(loc, clonedFuncOpName,
parentFuncOp.getFunctionType());
func::FuncOp clonedFuncOp = builder.create<func::FuncOp>(
loc, clonedFuncOpName, parentFuncOp.getFunctionType());
BlockAndValueMapping mapper;
builder.setInsertionPointToEnd(clonedFuncOp.addEntryBlock());
for (const auto &arg : enumerate(parentFuncOp.getArguments()))
Expand Down Expand Up @@ -61,7 +61,7 @@ struct SliceAnalysisTestPass

void SliceAnalysisTestPass::runOnOperation() {
ModuleOp module = getOperation();
auto funcOps = module.getOps<FuncOp>();
auto funcOps = module.getOps<func::FuncOp>();
unsigned opNum = 0;
for (auto funcOp : funcOps) {
// TODO: For now this is just looking for Linalg ops. It can be generalized
Expand Down
6 changes: 3 additions & 3 deletions mlir/test/lib/IR/TestSymbolUses.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ struct SymbolUsesPass
return "Test detection of symbol uses";
}
WalkResult operateOnSymbol(Operation *symbol, ModuleOp module,
SmallVectorImpl<FuncOp> &deadFunctions) {
SmallVectorImpl<func::FuncOp> &deadFunctions) {
// Test computing uses on a non symboltable op.
Optional<SymbolTable::UseRange> symbolUses =
SymbolTable::getSymbolUses(symbol);
Expand All @@ -42,7 +42,7 @@ struct SymbolUsesPass

// Test the functionality of symbolKnownUseEmpty.
if (SymbolTable::symbolKnownUseEmpty(symbol, &module.getBodyRegion())) {
FuncOp funcSymbol = dyn_cast<FuncOp>(symbol);
func::FuncOp funcSymbol = dyn_cast<func::FuncOp>(symbol);
if (funcSymbol && funcSymbol.isExternal())
deadFunctions.push_back(funcSymbol);

Expand Down Expand Up @@ -70,7 +70,7 @@ struct SymbolUsesPass
auto module = getOperation();

// Walk nested symbols.
SmallVector<FuncOp, 4> deadFunctions;
SmallVector<func::FuncOp, 4> deadFunctions;
module.getBodyRegion().walk([&](Operation *nestedOp) {
if (isa<SymbolOpInterface>(nestedOp))
return operateOnSymbol(nestedOp, module, deadFunctions);
Expand Down
6 changes: 3 additions & 3 deletions mlir/test/lib/IR/TestTypes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ using namespace test;

namespace {
struct TestRecursiveTypesPass
: public PassWrapper<TestRecursiveTypesPass, OperationPass<FuncOp>> {
: public PassWrapper<TestRecursiveTypesPass, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestRecursiveTypesPass)

LogicalResult createIRWithTypes();
Expand All @@ -25,7 +25,7 @@ struct TestRecursiveTypesPass
return "Test support for recursive types";
}
void runOnOperation() override {
FuncOp func = getOperation();
func::FuncOp func = getOperation();

// Just make sure recursive types are printed and parsed.
if (func.getName() == "roundtrip")
Expand All @@ -47,7 +47,7 @@ struct TestRecursiveTypesPass

LogicalResult TestRecursiveTypesPass::createIRWithTypes() {
MLIRContext *ctx = &getContext();
FuncOp func = getOperation();
func::FuncOp func = getOperation();
auto type = TestRecursiveType::get(ctx, "some_long_and_unique_name");
if (failed(type.setBody(type)))
return func.emitError("expected to be able to set the type body");
Expand Down
8 changes: 4 additions & 4 deletions mlir/test/lib/Pass/TestPassManager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ struct TestModulePass
}
};
struct TestFunctionPass
: public PassWrapper<TestFunctionPass, OperationPass<FuncOp>> {
: public PassWrapper<TestFunctionPass, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestFunctionPass)

void runOnOperation() final {}
Expand All @@ -50,7 +50,7 @@ struct TestInterfacePass
}
};
struct TestOptionsPass
: public PassWrapper<TestOptionsPass, OperationPass<FuncOp>> {
: public PassWrapper<TestOptionsPass, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestOptionsPass)

struct Options : public PassPipelineOptions<Options> {
Expand Down Expand Up @@ -155,11 +155,11 @@ static void testNestedPipeline(OpPassManager &pm) {
auto &modulePM = pm.nest<ModuleOp>();
modulePM.addPass(std::make_unique<TestModulePass>());
/// A nested function pass.
auto &nestedFunctionPM = modulePM.nest<FuncOp>();
auto &nestedFunctionPM = modulePM.nest<func::FuncOp>();
nestedFunctionPM.addPass(std::make_unique<TestFunctionPass>());

// Nest a function pipeline that contains a single pass.
auto &functionPM = pm.nest<FuncOp>();
auto &functionPM = pm.nest<func::FuncOp>();
functionPM.addPass(std::make_unique<TestFunctionPass>());
}

Expand Down
2 changes: 1 addition & 1 deletion mlir/test/lib/Transforms/TestControlFlowSink.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ namespace {
/// This pass will sink ops named `test.sink_me` and tag them with an attribute
/// `was_sunk` into the first region of `test.sink_target` ops.
struct TestControlFlowSinkPass
: public PassWrapper<TestControlFlowSinkPass, OperationPass<FuncOp>> {
: public PassWrapper<TestControlFlowSinkPass, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestControlFlowSinkPass)

/// Get the command-line argument of the test pass.
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/lib/Transforms/TestInlining.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ using namespace mlir;
using namespace test;

namespace {
struct Inliner : public PassWrapper<Inliner, OperationPass<FuncOp>> {
struct Inliner : public PassWrapper<Inliner, OperationPass<func::FuncOp>> {
MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(Inliner)

StringRef getArgument() const final { return "test-inline"; }
Expand Down
Loading