-
Notifications
You must be signed in to change notification settings - Fork 14.9k
[flang][mlir] Migrate to free create functions. NFC. #164657
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
|
@llvm/pr-subscribers-flang-fir-hlfir @llvm/pr-subscribers-flang-codegen Author: Jakub Kuderski (kuhar) ChangesSee https://discourse.llvm.org/t/psa-opty-create-now-with-100-more-tab-complete/87339. I plan to mark these as deprecated in #164649. Patch is 34.02 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/164657.diff 11 Files Affected:
diff --git a/flang/lib/Lower/Bridge.cpp b/flang/lib/Lower/Bridge.cpp
index acb8e114c167b..a516a44204cac 100644
--- a/flang/lib/Lower/Bridge.cpp
+++ b/flang/lib/Lower/Bridge.cpp
@@ -1766,7 +1766,7 @@ class FirConverter : public Fortran::lower::AbstractConverter {
// to a crash due to a block with no terminator. See issue #126452.
mlir::FunctionType funcType = builder->getFunction().getFunctionType();
mlir::Type resultType = funcType.getResult(0);
- mlir::Value undefResult = builder->create<fir::UndefOp>(loc, resultType);
+ mlir::Value undefResult = fir::UndefOp::create(*builder, loc, resultType);
genExitRoutine(false, undefResult);
return;
}
diff --git a/flang/lib/Lower/OpenMP/OpenMP.cpp b/flang/lib/Lower/OpenMP/OpenMP.cpp
index a49961cc233c6..71067283d13f7 100644
--- a/flang/lib/Lower/OpenMP/OpenMP.cpp
+++ b/flang/lib/Lower/OpenMP/OpenMP.cpp
@@ -2059,37 +2059,38 @@ static void genCanonicalLoopNest(
// Start lowering
mlir::Value zero = firOpBuilder.createIntegerConstant(loc, loopVarType, 0);
mlir::Value one = firOpBuilder.createIntegerConstant(loc, loopVarType, 1);
- mlir::Value isDownwards = firOpBuilder.create<mlir::arith::CmpIOp>(
- loc, mlir::arith::CmpIPredicate::slt, loopStepVar, zero);
+ mlir::Value isDownwards = mlir::arith::CmpIOp::create(
+ firOpBuilder, loc, mlir::arith::CmpIPredicate::slt, loopStepVar, zero);
// Ensure we are counting upwards. If not, negate step and swap lb and ub.
mlir::Value negStep =
- firOpBuilder.create<mlir::arith::SubIOp>(loc, zero, loopStepVar);
- mlir::Value incr = firOpBuilder.create<mlir::arith::SelectOp>(
- loc, isDownwards, negStep, loopStepVar);
- mlir::Value lb = firOpBuilder.create<mlir::arith::SelectOp>(
- loc, isDownwards, loopUBVar, loopLBVar);
- mlir::Value ub = firOpBuilder.create<mlir::arith::SelectOp>(
- loc, isDownwards, loopLBVar, loopUBVar);
+ mlir::arith::SubIOp::create(firOpBuilder, loc, zero, loopStepVar);
+ mlir::Value incr = mlir::arith::SelectOp::create(
+ firOpBuilder, loc, isDownwards, negStep, loopStepVar);
+ mlir::Value lb = mlir::arith::SelectOp::create(
+ firOpBuilder, loc, isDownwards, loopUBVar, loopLBVar);
+ mlir::Value ub = mlir::arith::SelectOp::create(
+ firOpBuilder, loc, isDownwards, loopLBVar, loopUBVar);
// Compute the trip count assuming lb <= ub. This guarantees that the result
// is non-negative and we can use unsigned arithmetic.
- mlir::Value span = firOpBuilder.create<mlir::arith::SubIOp>(
- loc, ub, lb, ::mlir::arith::IntegerOverflowFlags::nuw);
+ mlir::Value span = mlir::arith::SubIOp::create(
+ firOpBuilder, loc, ub, lb, ::mlir::arith::IntegerOverflowFlags::nuw);
mlir::Value tcMinusOne =
- firOpBuilder.create<mlir::arith::DivUIOp>(loc, span, incr);
- mlir::Value tcIfLooping = firOpBuilder.create<mlir::arith::AddIOp>(
- loc, tcMinusOne, one, ::mlir::arith::IntegerOverflowFlags::nuw);
+ mlir::arith::DivUIOp::create(firOpBuilder, loc, span, incr);
+ mlir::Value tcIfLooping =
+ mlir::arith::AddIOp::create(firOpBuilder, loc, tcMinusOne, one,
+ ::mlir::arith::IntegerOverflowFlags::nuw);
// Fall back to 0 if lb > ub
- mlir::Value isZeroTC = firOpBuilder.create<mlir::arith::CmpIOp>(
- loc, mlir::arith::CmpIPredicate::slt, ub, lb);
- mlir::Value tripcount = firOpBuilder.create<mlir::arith::SelectOp>(
- loc, isZeroTC, zero, tcIfLooping);
+ mlir::Value isZeroTC = mlir::arith::CmpIOp::create(
+ firOpBuilder, loc, mlir::arith::CmpIPredicate::slt, ub, lb);
+ mlir::Value tripcount = mlir::arith::SelectOp::create(
+ firOpBuilder, loc, isZeroTC, zero, tcIfLooping);
tripcounts.push_back(tripcount);
// Create the CLI handle.
- auto newcli = firOpBuilder.create<mlir::omp::NewCliOp>(loc);
+ auto newcli = mlir::omp::NewCliOp::create(firOpBuilder, loc);
mlir::Value cli = newcli.getResult();
clis.push_back(cli);
@@ -2122,10 +2123,10 @@ static void genCanonicalLoopNest(
"Expecting all block args to have been collected by now");
for (auto j : llvm::seq<size_t>(numLoops)) {
mlir::Value natIterNum = fir::getBase(blockArgs[j]);
- mlir::Value scaled = firOpBuilder.create<mlir::arith::MulIOp>(
- loc, natIterNum, loopStepVars[j]);
- mlir::Value userVal = firOpBuilder.create<mlir::arith::AddIOp>(
- loc, loopLBVars[j], scaled);
+ mlir::Value scaled = mlir::arith::MulIOp::create(
+ firOpBuilder, loc, natIterNum, loopStepVars[j]);
+ mlir::Value userVal = mlir::arith::AddIOp::create(
+ firOpBuilder, loc, loopLBVars[j], scaled);
mlir::OpBuilder::InsertPoint insPt =
firOpBuilder.saveInsertionPoint();
@@ -2198,9 +2199,9 @@ static void genTileOp(Fortran::lower::AbstractConverter &converter,
gridGeneratees.reserve(numLoops);
intratileGeneratees.reserve(numLoops);
for ([[maybe_unused]] auto i : llvm::seq<int>(0, sizesClause.sizes.size())) {
- auto gridCLI = firOpBuilder.create<mlir::omp::NewCliOp>(loc);
+ auto gridCLI = mlir::omp::NewCliOp::create(firOpBuilder, loc);
gridGeneratees.push_back(gridCLI.getResult());
- auto intratileCLI = firOpBuilder.create<mlir::omp::NewCliOp>(loc);
+ auto intratileCLI = mlir::omp::NewCliOp::create(firOpBuilder, loc);
intratileGeneratees.push_back(intratileCLI.getResult());
}
@@ -2209,8 +2210,8 @@ static void genTileOp(Fortran::lower::AbstractConverter &converter,
generatees.append(gridGeneratees);
generatees.append(intratileGeneratees);
- firOpBuilder.create<mlir::omp::TileOp>(loc, generatees, applyees,
- sizesClause.sizes);
+ mlir::omp::TileOp::create(firOpBuilder, loc, generatees, applyees,
+ sizesClause.sizes);
}
static void genUnrollOp(Fortran::lower::AbstractConverter &converter,
diff --git a/flang/lib/Optimizer/CodeGen/CodeGen.cpp b/flang/lib/Optimizer/CodeGen/CodeGen.cpp
index e71f4e3cee49c..478ab151b96d0 100644
--- a/flang/lib/Optimizer/CodeGen/CodeGen.cpp
+++ b/flang/lib/Optimizer/CodeGen/CodeGen.cpp
@@ -1151,7 +1151,7 @@ struct AllocMemOpConversion : public fir::FIROpConversion<fir::AllocMemOp> {
mlir::Value size = genTypeSizeInBytes(loc, ity, rewriter, llvmObjectTy);
if (auto scaleSize =
fir::genAllocationScaleSize(loc, heap.getInType(), ity, rewriter))
- size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, scaleSize);
+ size = mlir::LLVM::MulOp::create(rewriter, loc, ity, size, scaleSize);
for (mlir::Value opnd : adaptor.getOperands())
size = mlir::LLVM::MulOp::create(rewriter, loc, ity, size,
integerCast(loc, rewriter, ity, opnd));
diff --git a/flang/lib/Optimizer/CodeGen/CodeGenOpenMP.cpp b/flang/lib/Optimizer/CodeGen/CodeGenOpenMP.cpp
index 381b2a29c517a..f74d635d50a75 100644
--- a/flang/lib/Optimizer/CodeGen/CodeGenOpenMP.cpp
+++ b/flang/lib/Optimizer/CodeGen/CodeGenOpenMP.cpp
@@ -242,10 +242,11 @@ struct TargetAllocMemOpConversion
loc, llvmObjectTy, ity, rewriter, lowerTy().getDataLayout());
if (auto scaleSize = fir::genAllocationScaleSize(
loc, allocmemOp.getInType(), ity, rewriter))
- size = rewriter.create<mlir::LLVM::MulOp>(loc, ity, size, scaleSize);
+ size = mlir::LLVM::MulOp::create(rewriter, loc, ity, size, scaleSize);
for (mlir::Value opnd : adaptor.getOperands().drop_front())
- size = rewriter.create<mlir::LLVM::MulOp>(
- loc, ity, size, integerCast(lowerTy(), loc, rewriter, ity, opnd));
+ size = mlir::LLVM::MulOp::create(
+ rewriter, loc, ity, size,
+ integerCast(lowerTy(), loc, rewriter, ity, opnd));
auto mallocTyWidth = lowerTy().getIndexTypeBitwidth();
auto mallocTy =
mlir::IntegerType::get(rewriter.getContext(), mallocTyWidth);
diff --git a/flang/lib/Optimizer/OpenACC/Transforms/ACCRecipeBufferization.cpp b/flang/lib/Optimizer/OpenACC/Transforms/ACCRecipeBufferization.cpp
index 4840a999ecd27..0d135a94588e4 100644
--- a/flang/lib/Optimizer/OpenACC/Transforms/ACCRecipeBufferization.cpp
+++ b/flang/lib/Optimizer/OpenACC/Transforms/ACCRecipeBufferization.cpp
@@ -39,13 +39,13 @@ class BufferizeInterface {
static mlir::Operation *load(mlir::OpBuilder &builder, mlir::Location loc,
mlir::Value value) {
- return builder.create<fir::LoadOp>(loc, value);
+ return fir::LoadOp::create(builder, loc, value);
}
static mlir::Value placeInMemory(mlir::OpBuilder &builder, mlir::Location loc,
mlir::Value value) {
- auto alloca = builder.create<fir::AllocaOp>(loc, value.getType());
- builder.create<fir::StoreOp>(loc, value, alloca);
+ auto alloca = fir::AllocaOp::create(builder, loc, value.getType());
+ fir::StoreOp::create(builder, loc, value, alloca);
return alloca;
}
};
diff --git a/flang/lib/Optimizer/OpenMP/AutomapToTargetData.cpp b/flang/lib/Optimizer/OpenMP/AutomapToTargetData.cpp
index 817434ff3dc30..5793d46a192a7 100644
--- a/flang/lib/Optimizer/OpenMP/AutomapToTargetData.cpp
+++ b/flang/lib/Optimizer/OpenMP/AutomapToTargetData.cpp
@@ -130,8 +130,8 @@ class AutomapToTargetDataPass
builder.getBoolAttr(false));
clauses.mapVars.push_back(mapInfo);
isa<fir::StoreOp>(memOp)
- ? builder.create<omp::TargetEnterDataOp>(memOp.getLoc(), clauses)
- : builder.create<omp::TargetExitDataOp>(memOp.getLoc(), clauses);
+ ? omp::TargetEnterDataOp::create(builder, memOp.getLoc(), clauses)
+ : omp::TargetExitDataOp::create(builder, memOp.getLoc(), clauses);
};
for (fir::GlobalOp globalOp : automapGlobals) {
diff --git a/flang/lib/Optimizer/OpenMP/DoConcurrentConversion.cpp b/flang/lib/Optimizer/OpenMP/DoConcurrentConversion.cpp
index 65a23be243716..1229018bd9b3e 100644
--- a/flang/lib/Optimizer/OpenMP/DoConcurrentConversion.cpp
+++ b/flang/lib/Optimizer/OpenMP/DoConcurrentConversion.cpp
@@ -595,7 +595,7 @@ class DoConcurrentConversion
mlir::omp::TargetOperands &clauseOps,
mlir::omp::LoopNestOperands &loopNestClauseOps,
const LiveInShapeInfoMap &liveInShapeInfoMap) const {
- auto targetOp = rewriter.create<mlir::omp::TargetOp>(loc, clauseOps);
+ auto targetOp = mlir::omp::TargetOp::create(rewriter, loc, clauseOps);
auto argIface = llvm::cast<mlir::omp::BlockArgOpenMPOpInterface>(*targetOp);
mlir::Region ®ion = targetOp.getRegion();
@@ -672,7 +672,7 @@ class DoConcurrentConversion
// temporary.
Fortran::utils::openmp::cloneOrMapRegionOutsiders(builder, targetOp);
rewriter.setInsertionPoint(
- rewriter.create<mlir::omp::TerminatorOp>(targetOp.getLoc()));
+ mlir::omp::TerminatorOp::create(rewriter, targetOp.getLoc()));
return targetOp;
}
@@ -715,8 +715,8 @@ class DoConcurrentConversion
auto shapeShiftType = fir::ShapeShiftType::get(
builder.getContext(), shapeShiftOperands.size() / 2);
- return builder.create<fir::ShapeShiftOp>(
- liveInArg.getLoc(), shapeShiftType, shapeShiftOperands);
+ return fir::ShapeShiftOp::create(builder, liveInArg.getLoc(),
+ shapeShiftType, shapeShiftOperands);
}
llvm::SmallVector<mlir::Value> shapeOperands;
@@ -728,11 +728,11 @@ class DoConcurrentConversion
++shapeIdx;
}
- return builder.create<fir::ShapeOp>(liveInArg.getLoc(), shapeOperands);
+ return fir::ShapeOp::create(builder, liveInArg.getLoc(), shapeOperands);
}();
- return builder.create<hlfir::DeclareOp>(liveInArg.getLoc(), liveInArg,
- liveInName, shape);
+ return hlfir::DeclareOp::create(builder, liveInArg.getLoc(), liveInArg,
+ liveInName, shape);
}
mlir::omp::TeamsOp genTeamsOp(mlir::ConversionPatternRewriter &rewriter,
@@ -742,13 +742,13 @@ class DoConcurrentConversion
genReductions(rewriter, mapper, loop, teamsOps);
mlir::Location loc = loop.getLoc();
- auto teamsOp = rewriter.create<mlir::omp::TeamsOp>(loc, teamsOps);
+ auto teamsOp = mlir::omp::TeamsOp::create(rewriter, loc, teamsOps);
Fortran::common::openmp::EntryBlockArgs teamsArgs;
teamsArgs.reduction.vars = teamsOps.reductionVars;
Fortran::common::openmp::genEntryBlock(rewriter, teamsArgs,
teamsOp.getRegion());
- rewriter.setInsertionPoint(rewriter.create<mlir::omp::TerminatorOp>(loc));
+ rewriter.setInsertionPoint(mlir::omp::TerminatorOp::create(rewriter, loc));
for (auto [loopVar, teamsArg] : llvm::zip_equal(
loop.getReduceVars(), teamsOp.getRegion().getArguments())) {
@@ -761,8 +761,8 @@ class DoConcurrentConversion
mlir::omp::DistributeOp
genDistributeOp(mlir::Location loc,
mlir::ConversionPatternRewriter &rewriter) const {
- auto distOp = rewriter.create<mlir::omp::DistributeOp>(
- loc, /*clauses=*/mlir::omp::DistributeOperands{});
+ auto distOp = mlir::omp::DistributeOp::create(
+ rewriter, loc, /*clauses=*/mlir::omp::DistributeOperands{});
rewriter.createBlock(&distOp.getRegion());
return distOp;
diff --git a/flang/lib/Optimizer/OpenMP/LowerWorkdistribute.cpp b/flang/lib/Optimizer/OpenMP/LowerWorkdistribute.cpp
index 8a9b383ec1356..7b61539984232 100644
--- a/flang/lib/Optimizer/OpenMP/LowerWorkdistribute.cpp
+++ b/flang/lib/Optimizer/OpenMP/LowerWorkdistribute.cpp
@@ -282,14 +282,14 @@ fissionWorkdistribute(omp::WorkdistributeOp workdistribute) {
&newTeams.getRegion(), newTeams.getRegion().begin(), {}, {});
for (auto arg : teamsBlock->getArguments())
newTeamsBlock->addArgument(arg.getType(), arg.getLoc());
- auto newWorkdistribute = rewriter.create<omp::WorkdistributeOp>(loc);
- rewriter.create<omp::TerminatorOp>(loc);
+ auto newWorkdistribute = omp::WorkdistributeOp::create(rewriter, loc);
+ omp::TerminatorOp::create(rewriter, loc);
rewriter.createBlock(&newWorkdistribute.getRegion(),
newWorkdistribute.getRegion().begin(), {}, {});
auto *cloned = rewriter.clone(*parallelize);
parallelize->replaceAllUsesWith(cloned);
parallelize->erase();
- rewriter.create<omp::TerminatorOp>(loc);
+ omp::TerminatorOp::create(rewriter, loc);
changed = true;
}
}
@@ -298,10 +298,10 @@ fissionWorkdistribute(omp::WorkdistributeOp workdistribute) {
/// Generate omp.parallel operation with an empty region.
static void genParallelOp(Location loc, OpBuilder &rewriter, bool composite) {
- auto parallelOp = rewriter.create<mlir::omp::ParallelOp>(loc);
+ auto parallelOp = mlir::omp::ParallelOp::create(rewriter, loc);
parallelOp.setComposite(composite);
rewriter.createBlock(¶llelOp.getRegion());
- rewriter.setInsertionPoint(rewriter.create<mlir::omp::TerminatorOp>(loc));
+ rewriter.setInsertionPoint(mlir::omp::TerminatorOp::create(rewriter, loc));
return;
}
@@ -309,7 +309,7 @@ static void genParallelOp(Location loc, OpBuilder &rewriter, bool composite) {
static void genDistributeOp(Location loc, OpBuilder &rewriter, bool composite) {
mlir::omp::DistributeOperands distributeClauseOps;
auto distributeOp =
- rewriter.create<mlir::omp::DistributeOp>(loc, distributeClauseOps);
+ mlir::omp::DistributeOp::create(rewriter, loc, distributeClauseOps);
distributeOp.setComposite(composite);
auto distributeBlock = rewriter.createBlock(&distributeOp.getRegion());
rewriter.setInsertionPointToStart(distributeBlock);
@@ -334,12 +334,12 @@ static void genWsLoopOp(mlir::OpBuilder &rewriter, fir::DoLoopOp doLoop,
const mlir::omp::LoopNestOperands &clauseOps,
bool composite) {
- auto wsloopOp = rewriter.create<mlir::omp::WsloopOp>(doLoop.getLoc());
+ auto wsloopOp = mlir::omp::WsloopOp::create(rewriter, doLoop.getLoc());
wsloopOp.setComposite(composite);
rewriter.createBlock(&wsloopOp.getRegion());
auto loopNestOp =
- rewriter.create<mlir::omp::LoopNestOp>(doLoop.getLoc(), clauseOps);
+ mlir::omp::LoopNestOp::create(rewriter, doLoop.getLoc(), clauseOps);
// Clone the loop's body inside the loop nest construct using the
// mapped values.
@@ -351,7 +351,7 @@ static void genWsLoopOp(mlir::OpBuilder &rewriter, fir::DoLoopOp doLoop,
// Erase fir.result op of do loop and create yield op.
if (auto resultOp = dyn_cast<fir::ResultOp>(terminatorOp)) {
rewriter.setInsertionPoint(terminatorOp);
- rewriter.create<mlir::omp::YieldOp>(doLoop->getLoc());
+ mlir::omp::YieldOp::create(rewriter, doLoop->getLoc());
terminatorOp->erase();
}
}
@@ -494,15 +494,15 @@ static SmallVector<Value> convertFlatToMultiDim(OpBuilder &builder,
// Convert flat index to multi-dimensional indices
SmallVector<Value> indices(rank);
Value temp = flatIdx;
- auto c1 = builder.create<arith::ConstantIndexOp>(loc, 1);
+ auto c1 = arith::ConstantIndexOp::create(builder, loc, 1);
// Work backwards through dimensions (row-major order)
for (int i = rank - 1; i >= 0; --i) {
- Value zeroBasedIdx = builder.create<arith::RemSIOp>(loc, temp, extents[i]);
+ Value zeroBasedIdx = arith::RemSIOp::create(builder, loc, temp, extents[i]);
// Convert to one-based index
- indices[i] = builder.create<arith::AddIOp>(loc, zeroBasedIdx, c1);
+ indices[i] = arith::AddIOp::create(builder, loc, zeroBasedIdx, c1);
if (i > 0) {
- temp = builder.create<arith::DivSIOp>(loc, temp, extents[i]);
+ temp = arith::DivSIOp::create(builder, loc, temp, extents[i]);
}
}
@@ -525,7 +525,7 @@ static Value CalculateTotalElements(OpBuilder &builder, Location loc,
if (i == 0) {
totalElems = extent;
} else {
- totalElems = builder.create<arith::MulIOp>(loc, totalElems, extent);
+ totalElems = arith::MulIOp::create(builder, loc, totalElems, extent);
}
}
return totalElems;
@@ -562,14 +562,14 @@ static void replaceWithUnorderedDoLoop(OpBuilder &builder, Location loc,
// Load destination array box (if it's a reference)
Value arrayBox = destBox;
if (isa<fir::ReferenceType>(destBox.getType()))
- arrayBox = builder.create<fir::LoadOp>(loc, destBox);
+ arrayBox = fir::LoadOp::create(builder, loc, destBox);
- auto scalarValue = builder.create<fir::BoxAddrOp>(loc, srcBox);
- Value scalar = builder.create<fir::LoadOp>(loc, scalarValue);
+ auto scalarValue = fir::BoxAddrOp::create(builder, loc, srcBox);
+ Value scalar = fir::LoadOp::create(builder, loc, scalarValue);
// Calculate total number of elements (flattened)
- auto c0 = builder.create<arith::ConstantIndexOp>(loc, 0);
- auto c1 = builder.create<arith::ConstantIndexOp>(loc, 1);
+ auto c0 = arith::ConstantIndexOp::create(builder, loc, 0);
+ auto c1 = arith::ConstantIndexOp::create(builder, loc, 1);
Value totalElems = CalculateTotalElements(builder, loc, arrayBox);
auto *workdistributeBlock = &workdistribute.getRegion().front();
@@ -587,7 +587,7 @@ static void replaceWithUnorderedDoLoop(OpBuilder &builder, Location loc,
builder, loc, fir::ReferenceType::get(scalar.getType()), arrayBox,
nullptr, nullptr, ValueRange{indices}, ValueRange{});
- builder.create<fir::StoreOp>(loc, scalar, elemPtr);
+ fir::StoreOp::create(builder, loc, scalar, elemPtr);
}
/// workdistributeRuntimeCallLower method finds the runtime calls
@@ -749,14 +749,15 @@ FailureOr<omp::TargetOp> splitTargetData(omp::TargetOp targetOp,
auto deviceAddrVars = targetOp.getHasDeviceAddrVars();
auto devicePtrVars = targetOp.getIsDevicePtrVars();
// Create the target data op
- auto targetDataOp = rewriter.create<omp::TargetDataOp>(
- loc, device, ifExpr, outerMapInfos, deviceAddrVars, devicePtrVars);
+ auto targetDataOp =
+ omp::TargetDataOp::create(rewriter, loc, device, ifExpr, outerMapInfos,
+ deviceAddrVars, devicePtrVars);
auto taregtDataBlock = rewriter.createBlock(&targetDataOp.getRegion());
- rewriter.create<mlir::omp::TerminatorOp>(lo...
[truncated]
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks
See https://discourse.llvm.org/t/psa-opty-create-now-with-100-more-tab-complete/87339.
I plan to mark these as deprecated in #164649.