diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPClauseOperands.h b/mlir/include/mlir/Dialect/OpenMP/OpenMPClauseOperands.h index 3c5fa23bd4a7f..244cee1dd635b 100644 --- a/mlir/include/mlir/Dialect/OpenMP/OpenMPClauseOperands.h +++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPClauseOperands.h @@ -295,10 +295,9 @@ using TeamsClauseOps = PrivateClauseOps, ReductionClauseOps, ThreadLimitClauseOps>; using WsloopClauseOps = - detail::Clauses; + detail::Clauses; } // namespace omp } // namespace mlir diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td index 10771f6e854dd..8ab116ce391e2 100644 --- a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td +++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td @@ -600,29 +600,30 @@ def LoopNestOp : OpenMP_Op<"loop_nest", [SameVariadicOperandSize, //===----------------------------------------------------------------------===// def WsloopOp : OpenMP_Op<"wsloop", [AttrSizedOperandSegments, - AllTypesMatch<["lowerBound", "upperBound", "step"]>, DeclareOpInterfaceMethods, - RecursiveMemoryEffects, ReductionClauseInterface]> { + RecursiveMemoryEffects, ReductionClauseInterface, + SingleBlockImplicitTerminator<"TerminatorOp">]> { let summary = "worksharing-loop construct"; let description = [{ The worksharing-loop construct specifies that the iterations of the loop(s) will be executed in parallel by threads in the current context. These iterations are spread across threads that already exist in the enclosing - parallel region. The lower and upper bounds specify a half-open range: the - range includes the lower bound but does not include the upper bound. If the - `inclusive` attribute is specified then the upper bound is also included. + parallel region. - The body region can contain any number of blocks. The region is terminated - by "omp.yield" instruction without operands. + The body region can only contain a single block which must contain a single + operation and a terminator. The operation must be another compatible loop + wrapper or an `omp.loop_nest`. ``` - omp.wsloop - for (%i1, %i2) : index = (%c0, %c0) to (%c10, %c10) step (%c1, %c1) { - %a = load %arrA[%i1, %i2] : memref - %b = load %arrB[%i1, %i2] : memref - %sum = arith.addf %a, %b : f32 - store %sum, %arrC[%i1, %i2] : memref - omp.yield + omp.wsloop { + omp.loop_nest (%i1, %i2) : index = (%c0, %c0) to (%c10, %c10) step (%c1, %c1) { + %a = load %arrA[%i1, %i2] : memref + %b = load %arrB[%i1, %i2] : memref + %sum = arith.addf %a, %b : f32 + store %sum, %arrC[%i1, %i2] : memref + omp.yield + } + omp.terminator } ``` @@ -665,10 +666,7 @@ def WsloopOp : OpenMP_Op<"wsloop", [AttrSizedOperandSegments, passed by reference. }]; - let arguments = (ins Variadic:$lowerBound, - Variadic:$upperBound, - Variadic:$step, - Variadic:$linear_vars, + let arguments = (ins Variadic:$linear_vars, Variadic:$linear_step_vars, Variadic:$reduction_vars, OptionalAttr:$reductions, @@ -679,22 +677,16 @@ def WsloopOp : OpenMP_Op<"wsloop", [AttrSizedOperandSegments, UnitAttr:$nowait, UnitAttr:$byref, ConfinedAttr, [IntMinValue<0>]>:$ordered_val, - OptionalAttr:$order_val, - UnitAttr:$inclusive); + OptionalAttr:$order_val); let builders = [ - OpBuilder<(ins "ValueRange":$lowerBound, "ValueRange":$upperBound, - "ValueRange":$step, - CArg<"ArrayRef", "{}">:$attributes)>, + OpBuilder<(ins CArg<"ArrayRef", "{}">:$attributes)>, OpBuilder<(ins CArg<"const WsloopClauseOps &">:$clauses)> ]; let regions = (region AnyRegion:$region); let extraClassDeclaration = [{ - /// Returns the number of loops in the worksharing-loop nest. - unsigned getNumLoops() { return getLowerBound().size(); } - /// Returns the number of reduction variables. unsigned getNumReductionVars() { return getReductionVars().size(); } }]; @@ -711,9 +703,8 @@ def WsloopOp : OpenMP_Op<"wsloop", [AttrSizedOperandSegments, |`byref` $byref |`ordered` `(` $ordered_val `)` |`order` `(` custom($order_val) `)` - ) custom($region, $lowerBound, $upperBound, $step, type($step), - $reduction_vars, type($reduction_vars), $reductions, - $inclusive) attr-dict + ) custom($region, $reduction_vars, type($reduction_vars), + $reductions) attr-dict }]; let hasVerifier = 1; } @@ -732,7 +723,7 @@ def SimdOp : OpenMP_Op<"simd", [AttrSizedOperandSegments, transformed into a SIMD loop (that is, multiple iterations of the loop can be executed concurrently using SIMD instructions). - The body region can contain a single block which must contain a single + The body region can only contain a single block which must contain a single operation and a terminator. The operation must be another compatible loop wrapper or an `omp.loop_nest`. @@ -766,6 +757,7 @@ def SimdOp : OpenMP_Op<"simd", [AttrSizedOperandSegments, store %sum, %arrC[%i1, %i2] : memref omp.yield } + omp.terminator } ``` }]; @@ -805,8 +797,8 @@ def SimdOp : OpenMP_Op<"simd", [AttrSizedOperandSegments, def YieldOp : OpenMP_Op<"yield", [Pure, ReturnLike, Terminator, - ParentOneOf<["LoopNestOp", "WsloopOp", "DeclareReductionOp", - "AtomicUpdateOp", "PrivateClauseOp"]>]> { + ParentOneOf<["AtomicUpdateOp", "DeclareReductionOp", "LoopNestOp", + "PrivateClauseOp"]>]> { let summary = "loop yield and termination operation"; let description = [{ "omp.yield" yields SSA values from the OpenMP dialect op region and @@ -846,7 +838,7 @@ def DistributeOp : OpenMP_Op<"distribute", [AttrSizedOperandSegments, iterations are spread across threads that already exist in the enclosing region. - The body region can contain a single block which must contain a single + The body region can only contain a single block which must contain a single operation and a terminator. The operation must be another compatible loop wrapper or an `omp.loop_nest`. @@ -864,6 +856,7 @@ def DistributeOp : OpenMP_Op<"distribute", [AttrSizedOperandSegments, store %sum, %arrC[%i1, %i2] : memref omp.yield } + omp.terminator } ``` // TODO: private_var, firstprivate_var, lastprivate_var, collapse @@ -1029,7 +1022,7 @@ def TaskloopOp : OpenMP_Op<"taskloop", [AttrSizedOperandSegments, iterations are distributed across tasks generated by the construct and scheduled to be executed. - The body region can contain a single block which must contain a single + The body region can only contain a single block which must contain a single operation and a terminator. The operation must be another compatible loop wrapper or an `omp.loop_nest`. @@ -1042,6 +1035,7 @@ def TaskloopOp : OpenMP_Op<"taskloop", [AttrSizedOperandSegments, store %sum, %arrC[%i1, %i2] : memref omp.yield } + omp.terminator } ``` diff --git a/mlir/lib/Conversion/SCFToOpenMP/SCFToOpenMP.cpp b/mlir/lib/Conversion/SCFToOpenMP/SCFToOpenMP.cpp index 7f91367ad427a..d6f85451ee5d3 100644 --- a/mlir/lib/Conversion/SCFToOpenMP/SCFToOpenMP.cpp +++ b/mlir/lib/Conversion/SCFToOpenMP/SCFToOpenMP.cpp @@ -461,18 +461,50 @@ struct ParallelOpLowering : public OpRewritePattern { // Replace the loop. { OpBuilder::InsertionGuard allocaGuard(rewriter); - auto loop = rewriter.create( + // Create worksharing loop wrapper. + auto wsloopOp = rewriter.create(parallelOp.getLoc()); + if (!reductionVariables.empty()) { + wsloopOp.setReductionsAttr( + ArrayAttr::get(rewriter.getContext(), reductionDeclSymbols)); + wsloopOp.getReductionVarsMutable().append(reductionVariables); + } + rewriter.create(loc); // omp.parallel terminator. + + // The wrapper's entry block arguments will define the reduction + // variables. + llvm::SmallVector reductionTypes; + reductionTypes.reserve(reductionVariables.size()); + llvm::transform(reductionVariables, std::back_inserter(reductionTypes), + [](mlir::Value v) { return v.getType(); }); + rewriter.createBlock( + &wsloopOp.getRegion(), {}, reductionTypes, + llvm::SmallVector(reductionVariables.size(), + parallelOp.getLoc())); + + rewriter.setInsertionPoint( + rewriter.create(parallelOp.getLoc())); + + // Create loop nest and populate region with contents of scf.parallel. + auto loopOp = rewriter.create( parallelOp.getLoc(), parallelOp.getLowerBound(), parallelOp.getUpperBound(), parallelOp.getStep()); - rewriter.create(loc); - rewriter.inlineRegionBefore(parallelOp.getRegion(), loop.getRegion(), - loop.getRegion().begin()); + rewriter.inlineRegionBefore(parallelOp.getRegion(), loopOp.getRegion(), + loopOp.getRegion().begin()); - Block *ops = rewriter.splitBlock(&*loop.getRegion().begin(), - loop.getRegion().begin()->begin()); + // Remove reduction-related block arguments from omp.loop_nest and + // redirect uses to the corresponding omp.wsloop block argument. + mlir::Block &loopOpEntryBlock = loopOp.getRegion().front(); + unsigned numLoops = parallelOp.getNumLoops(); + rewriter.replaceAllUsesWith( + loopOpEntryBlock.getArguments().drop_front(numLoops), + wsloopOp.getRegion().getArguments()); + loopOpEntryBlock.eraseArguments( + numLoops, loopOpEntryBlock.getNumArguments() - numLoops); - rewriter.setInsertionPointToStart(&*loop.getRegion().begin()); + Block *ops = + rewriter.splitBlock(&loopOpEntryBlock, loopOpEntryBlock.begin()); + rewriter.setInsertionPointToStart(&loopOpEntryBlock); auto scope = rewriter.create(parallelOp.getLoc(), TypeRange()); @@ -481,11 +513,6 @@ struct ParallelOpLowering : public OpRewritePattern { rewriter.mergeBlocks(ops, scopeBlock); rewriter.setInsertionPointToEnd(&*scope.getBodyRegion().begin()); rewriter.create(loc, ValueRange()); - if (!reductionVariables.empty()) { - loop.setReductionsAttr( - ArrayAttr::get(rewriter.getContext(), reductionDeclSymbols)); - loop.getReductionVarsMutable().append(reductionVariables); - } } } diff --git a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp index 528a0d05b1011..f60668dd0cf99 100644 --- a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp +++ b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp @@ -1484,86 +1484,72 @@ LogicalResult SingleOp::verify() { // WsloopOp //===----------------------------------------------------------------------===// -/// loop-control ::= `(` ssa-id-list `)` `:` type `=` loop-bounds -/// loop-bounds := `(` ssa-id-list `)` to `(` ssa-id-list `)` inclusive? steps -/// steps := `step` `(`ssa-id-list`)` ParseResult parseWsloop(OpAsmParser &parser, Region ®ion, - SmallVectorImpl &lowerBound, - SmallVectorImpl &upperBound, - SmallVectorImpl &steps, - SmallVectorImpl &loopVarTypes, SmallVectorImpl &reductionOperands, - SmallVectorImpl &reductionTypes, ArrayAttr &reductionSymbols, - UnitAttr &inclusive) { - + SmallVectorImpl &reductionTypes, + ArrayAttr &reductionSymbols) { // Parse an optional reduction clause llvm::SmallVector privates; - bool hasReduction = succeeded(parser.parseOptionalKeyword("reduction")) && - succeeded(parseClauseWithRegionArgs( - parser, region, reductionOperands, reductionTypes, - reductionSymbols, privates)); - - if (parser.parseKeyword("for")) - return failure(); - - // Parse an opening `(` followed by induction variables followed by `)` - SmallVector ivs; - Type loopVarType; - if (parser.parseArgumentList(ivs, OpAsmParser::Delimiter::Paren) || - parser.parseColonType(loopVarType) || - // Parse loop bounds. - parser.parseEqual() || - parser.parseOperandList(lowerBound, ivs.size(), - OpAsmParser::Delimiter::Paren) || - parser.parseKeyword("to") || - parser.parseOperandList(upperBound, ivs.size(), - OpAsmParser::Delimiter::Paren)) - return failure(); - - if (succeeded(parser.parseOptionalKeyword("inclusive"))) - inclusive = UnitAttr::get(parser.getBuilder().getContext()); - - // Parse step values. - if (parser.parseKeyword("step") || - parser.parseOperandList(steps, ivs.size(), OpAsmParser::Delimiter::Paren)) - return failure(); - - // Now parse the body. - loopVarTypes = SmallVector(ivs.size(), loopVarType); - for (auto &iv : ivs) - iv.type = loopVarType; - - SmallVector regionArgs{ivs}; - if (hasReduction) - llvm::copy(privates, std::back_inserter(regionArgs)); - - return parser.parseRegion(region, regionArgs); + if (succeeded(parser.parseOptionalKeyword("reduction"))) { + if (failed(parseClauseWithRegionArgs(parser, region, reductionOperands, + reductionTypes, reductionSymbols, + privates))) + return failure(); + } + return parser.parseRegion(region, privates); } void printWsloop(OpAsmPrinter &p, Operation *op, Region ®ion, - ValueRange lowerBound, ValueRange upperBound, ValueRange steps, - TypeRange loopVarTypes, ValueRange reductionOperands, - TypeRange reductionTypes, ArrayAttr reductionSymbols, - UnitAttr inclusive) { + ValueRange reductionOperands, TypeRange reductionTypes, + ArrayAttr reductionSymbols) { if (reductionSymbols) { - auto reductionArgs = - region.front().getArguments().drop_front(loopVarTypes.size()); + auto reductionArgs = region.front().getArguments(); printClauseWithRegionArgs(p, op, reductionArgs, "reduction", reductionOperands, reductionTypes, reductionSymbols); } - - p << " for "; - auto args = region.front().getArguments().drop_back(reductionOperands.size()); - p << " (" << args << ") : " << args[0].getType() << " = (" << lowerBound - << ") to (" << upperBound << ") "; - if (inclusive) - p << "inclusive "; - p << "step (" << steps << ") "; p.printRegion(region, /*printEntryBlockArgs=*/false); } +void WsloopOp::build(OpBuilder &builder, OperationState &state, + ArrayRef attributes) { + build(builder, state, /*linear_vars=*/ValueRange(), + /*linear_step_vars=*/ValueRange(), /*reduction_vars=*/ValueRange(), + /*reductions=*/nullptr, /*schedule_val=*/nullptr, + /*schedule_chunk_var=*/nullptr, /*schedule_modifier=*/nullptr, + /*simd_modifier=*/false, /*nowait=*/false, /*byref=*/false, + /*ordered_val=*/nullptr, /*order_val=*/nullptr); + state.addAttributes(attributes); +} + +void WsloopOp::build(OpBuilder &builder, OperationState &state, + const WsloopClauseOps &clauses) { + MLIRContext *ctx = builder.getContext(); + // TODO: Store clauses in op: allocateVars, allocatorVars, privateVars, + // privatizers. + WsloopOp::build( + builder, state, clauses.linearVars, clauses.linearStepVars, + clauses.reductionVars, makeArrayAttr(ctx, clauses.reductionDeclSymbols), + clauses.scheduleValAttr, clauses.scheduleChunkVar, + clauses.scheduleModAttr, clauses.scheduleSimdAttr, clauses.nowaitAttr, + clauses.reductionByRefAttr, clauses.orderedAttr, clauses.orderAttr); +} + +LogicalResult WsloopOp::verify() { + if (!isWrapper()) + return emitOpError() << "must be a loop wrapper"; + + if (LoopWrapperInterface nested = getNestedWrapper()) { + // Check for the allowed leaf constructs that may appear in a composite + // construct directly after DO/FOR. + if (!isa(nested)) + return emitError() << "only supported nested wrapper is 'omp.simd'"; + } + + return verifyReductionVarList(*this, getReductions(), getReductionVars()); +} + //===----------------------------------------------------------------------===// // Simd construct [2.9.3.1] //===----------------------------------------------------------------------===// @@ -1947,42 +1933,6 @@ void LoopNestOp::gatherWrappers( } } -//===----------------------------------------------------------------------===// -// WsloopOp -//===----------------------------------------------------------------------===// - -void WsloopOp::build(OpBuilder &builder, OperationState &state, - ValueRange lowerBound, ValueRange upperBound, - ValueRange step, ArrayRef attributes) { - build(builder, state, lowerBound, upperBound, step, - /*linear_vars=*/ValueRange(), - /*linear_step_vars=*/ValueRange(), /*reduction_vars=*/ValueRange(), - /*reductions=*/nullptr, /*schedule_val=*/nullptr, - /*schedule_chunk_var=*/nullptr, /*schedule_modifier=*/nullptr, - /*simd_modifier=*/false, /*nowait=*/false, /*byref=*/false, - /*ordered_val=*/nullptr, - /*order_val=*/nullptr, /*inclusive=*/false); - state.addAttributes(attributes); -} - -void WsloopOp::build(OpBuilder &builder, OperationState &state, - const WsloopClauseOps &clauses) { - MLIRContext *ctx = builder.getContext(); - // TODO Store clauses in op: allocateVars, allocatorVars, privateVars, - // privatizers. - WsloopOp::build( - builder, state, clauses.loopLBVar, clauses.loopUBVar, clauses.loopStepVar, - clauses.linearVars, clauses.linearStepVars, clauses.reductionVars, - makeArrayAttr(ctx, clauses.reductionDeclSymbols), clauses.scheduleValAttr, - clauses.scheduleChunkVar, clauses.scheduleModAttr, - clauses.scheduleSimdAttr, clauses.nowaitAttr, clauses.reductionByRefAttr, - clauses.orderedAttr, clauses.orderAttr, clauses.loopInclusiveAttr); -} - -LogicalResult WsloopOp::verify() { - return verifyReductionVarList(*this, getReductions(), getReductionVars()); -} - //===----------------------------------------------------------------------===// // Critical construct (2.17.1) //===----------------------------------------------------------------------===// @@ -2014,6 +1964,39 @@ LogicalResult CriticalOp::verifySymbolUses(SymbolTableCollection &symbolTable) { // Ordered construct //===----------------------------------------------------------------------===// +static LogicalResult verifyOrderedParent(Operation &op) { + bool hasRegion = op.getNumRegions() > 0; + auto loopOp = op.getParentOfType(); + if (!loopOp) { + if (hasRegion) + return success(); + + // TODO: Consider if this needs to be the case only for the standalone + // variant of the ordered construct. + return op.emitOpError() << "must be nested inside of a loop"; + } + + Operation *wrapper = loopOp->getParentOp(); + if (auto wsloopOp = dyn_cast(wrapper)) { + IntegerAttr orderedAttr = wsloopOp.getOrderedValAttr(); + if (!orderedAttr) + return op.emitOpError() << "the enclosing worksharing-loop region must " + "have an ordered clause"; + + if (hasRegion && orderedAttr.getInt() != 0) + return op.emitOpError() << "the enclosing loop's ordered clause must not " + "have a parameter present"; + + if (!hasRegion && orderedAttr.getInt() == 0) + return op.emitOpError() << "the enclosing loop's ordered clause must " + "have a parameter present"; + } else if (!isa(wrapper)) { + return op.emitOpError() << "must be nested inside of a worksharing, simd " + "or worksharing simd loop"; + } + return success(); +} + void OrderedOp::build(OpBuilder &builder, OperationState &state, const OrderedOpClauseOps &clauses) { OrderedOp::build(builder, state, clauses.doacrossDependTypeAttr, @@ -2021,14 +2004,11 @@ void OrderedOp::build(OpBuilder &builder, OperationState &state, } LogicalResult OrderedOp::verify() { - auto container = (*this)->getParentOfType(); - if (!container || !container.getOrderedValAttr() || - container.getOrderedValAttr().getInt() == 0) - return emitOpError() << "ordered depend directive must be closely " - << "nested inside a worksharing-loop with ordered " - << "clause with parameter present"; - - if (container.getOrderedValAttr().getInt() != (int64_t)*getNumLoopsVal()) + if (failed(verifyOrderedParent(**this))) + return failure(); + + auto wrapper = (*this)->getParentOfType(); + if (!wrapper || *wrapper.getOrderedVal() != *getNumLoopsVal()) return emitOpError() << "number of variables in depend clause does not " << "match number of iteration variables in the " << "doacross loop"; @@ -2046,15 +2026,7 @@ LogicalResult OrderedRegionOp::verify() { if (getSimd()) return failure(); - if (auto container = (*this)->getParentOfType()) { - if (!container.getOrderedValAttr() || - container.getOrderedValAttr().getInt() != 0) - return emitOpError() << "ordered region must be closely nested inside " - << "a worksharing-loop region with an ordered " - << "clause without parameter present"; - } - - return success(); + return verifyOrderedParent(**this); } //===----------------------------------------------------------------------===// @@ -2199,15 +2171,19 @@ LogicalResult CancelOp::verify() { << "inside a parallel region"; } if (cct == ClauseCancellationConstructType::Loop) { - if (!isa(parentOp)) { - return emitOpError() << "cancel loop must appear " - << "inside a worksharing-loop region"; + auto loopOp = dyn_cast(parentOp); + auto wsloopOp = llvm::dyn_cast_if_present( + loopOp ? loopOp->getParentOp() : nullptr); + + if (!wsloopOp) { + return emitOpError() + << "cancel loop must appear inside a worksharing-loop region"; } - if (cast(parentOp).getNowaitAttr()) { + if (wsloopOp.getNowaitAttr()) { return emitError() << "A worksharing construct that is canceled " << "must not have a nowait clause"; } - if (cast(parentOp).getOrderedValAttr()) { + if (wsloopOp.getOrderedValAttr()) { return emitError() << "A worksharing construct that is canceled " << "must not have an ordered clause"; } @@ -2245,7 +2221,7 @@ LogicalResult CancellationPointOp::verify() { << "inside a parallel region"; } if ((cct == ClauseCancellationConstructType::Loop) && - !isa(parentOp)) { + (!isa(parentOp) || !isa(parentOp->getParentOp()))) { return emitOpError() << "cancellation point loop must appear " << "inside a worksharing-loop region"; } diff --git a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp index ebcdbc02aadd0..9f87f89d8c636 100644 --- a/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp +++ b/mlir/lib/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.cpp @@ -916,35 +916,37 @@ static LogicalResult inlineReductionCleanup( static LogicalResult convertOmpWsloop(Operation &opInst, llvm::IRBuilderBase &builder, LLVM::ModuleTranslation &moduleTranslation) { - auto loop = cast(opInst); - const bool isByRef = loop.getByref(); + auto wsloopOp = cast(opInst); + auto loopOp = cast(wsloopOp.getWrappedLoop()); + const bool isByRef = wsloopOp.getByref(); + // TODO: this should be in the op verifier instead. - if (loop.getLowerBound().empty()) + if (loopOp.getLowerBound().empty()) return failure(); // Static is the default. auto schedule = - loop.getScheduleVal().value_or(omp::ClauseScheduleKind::Static); + wsloopOp.getScheduleVal().value_or(omp::ClauseScheduleKind::Static); // Find the loop configuration. - llvm::Value *step = moduleTranslation.lookupValue(loop.getStep()[0]); + llvm::Value *step = moduleTranslation.lookupValue(loopOp.getStep()[0]); llvm::Type *ivType = step->getType(); llvm::Value *chunk = nullptr; - if (loop.getScheduleChunkVar()) { + if (wsloopOp.getScheduleChunkVar()) { llvm::Value *chunkVar = - moduleTranslation.lookupValue(loop.getScheduleChunkVar()); + moduleTranslation.lookupValue(wsloopOp.getScheduleChunkVar()); chunk = builder.CreateSExtOrTrunc(chunkVar, ivType); } SmallVector reductionDecls; - collectReductionDecls(loop, reductionDecls); + collectReductionDecls(wsloopOp, reductionDecls); llvm::OpenMPIRBuilder::InsertPointTy allocaIP = findAllocaInsertPoint(builder, moduleTranslation); SmallVector privateReductionVariables; DenseMap reductionVariableMap; if (!isByRef) { - allocByValReductionVars(loop, builder, moduleTranslation, allocaIP, + allocByValReductionVars(wsloopOp, builder, moduleTranslation, allocaIP, reductionDecls, privateReductionVariables, reductionVariableMap); } @@ -952,13 +954,12 @@ convertOmpWsloop(Operation &opInst, llvm::IRBuilderBase &builder, // Before the loop, store the initial values of reductions into reduction // variables. Although this could be done after allocas, we don't want to mess // up with the alloca insertion point. - MutableArrayRef reductionArgs = - loop.getRegion().getArguments().take_back(loop.getNumReductionVars()); - for (unsigned i = 0; i < loop.getNumReductionVars(); ++i) { + ArrayRef reductionArgs = wsloopOp.getRegion().getArguments(); + for (unsigned i = 0; i < wsloopOp.getNumReductionVars(); ++i) { SmallVector phis; // map block argument to initializer region - mapInitializationArg(loop, moduleTranslation, reductionDecls, i); + mapInitializationArg(wsloopOp, moduleTranslation, reductionDecls, i); if (failed(inlineConvertOmpRegions(reductionDecls[i].getInitializerRegion(), "omp.reduction.neutral", builder, @@ -977,7 +978,7 @@ convertOmpWsloop(Operation &opInst, llvm::IRBuilderBase &builder, privateReductionVariables.push_back(var); moduleTranslation.mapValue(reductionArgs[i], phis[0]); - reductionVariableMap.try_emplace(loop.getReductionVars()[i], phis[0]); + reductionVariableMap.try_emplace(wsloopOp.getReductionVars()[i], phis[0]); } else { // for by-ref case the store is inside of the reduction region builder.CreateStore(phis[0], privateReductionVariables[i]); @@ -1008,33 +1009,34 @@ convertOmpWsloop(Operation &opInst, llvm::IRBuilderBase &builder, auto bodyGen = [&](llvm::OpenMPIRBuilder::InsertPointTy ip, llvm::Value *iv) { // Make sure further conversions know about the induction variable. moduleTranslation.mapValue( - loop.getRegion().front().getArgument(loopInfos.size()), iv); + loopOp.getRegion().front().getArgument(loopInfos.size()), iv); // Capture the body insertion point for use in nested loops. BodyIP of the // CanonicalLoopInfo always points to the beginning of the entry block of // the body. bodyInsertPoints.push_back(ip); - if (loopInfos.size() != loop.getNumLoops() - 1) + if (loopInfos.size() != loopOp.getNumLoops() - 1) return; // Convert the body of the loop. builder.restoreIP(ip); - convertOmpOpRegions(loop.getRegion(), "omp.wsloop.region", builder, + convertOmpOpRegions(loopOp.getRegion(), "omp.wsloop.region", builder, moduleTranslation, bodyGenStatus); }; // Delegate actual loop construction to the OpenMP IRBuilder. - // TODO: this currently assumes Wsloop is semantically similar to SCF loop, - // i.e. it has a positive step, uses signed integer semantics. Reconsider - // this code when Wsloop clearly supports more cases. + // TODO: this currently assumes omp.loop_nest is semantically similar to SCF + // loop, i.e. it has a positive step, uses signed integer semantics. + // Reconsider this code when the nested loop operation clearly supports more + // cases. llvm::OpenMPIRBuilder *ompBuilder = moduleTranslation.getOpenMPBuilder(); - for (unsigned i = 0, e = loop.getNumLoops(); i < e; ++i) { + for (unsigned i = 0, e = loopOp.getNumLoops(); i < e; ++i) { llvm::Value *lowerBound = - moduleTranslation.lookupValue(loop.getLowerBound()[i]); + moduleTranslation.lookupValue(loopOp.getLowerBound()[i]); llvm::Value *upperBound = - moduleTranslation.lookupValue(loop.getUpperBound()[i]); - llvm::Value *step = moduleTranslation.lookupValue(loop.getStep()[i]); + moduleTranslation.lookupValue(loopOp.getUpperBound()[i]); + llvm::Value *step = moduleTranslation.lookupValue(loopOp.getStep()[i]); // Make sure loop trip count are emitted in the preheader of the outermost // loop at the latest so that they are all available for the new collapsed @@ -1047,7 +1049,7 @@ convertOmpWsloop(Operation &opInst, llvm::IRBuilderBase &builder, } loopInfos.push_back(ompBuilder->createCanonicalLoop( loc, bodyGen, lowerBound, upperBound, step, - /*IsSigned=*/true, loop.getInclusive(), computeIP)); + /*IsSigned=*/true, loopOp.getInclusive(), computeIP)); if (failed(bodyGenStatus)) return failure(); @@ -1062,13 +1064,13 @@ convertOmpWsloop(Operation &opInst, llvm::IRBuilderBase &builder, allocaIP = findAllocaInsertPoint(builder, moduleTranslation); // TODO: Handle doacross loops when the ordered clause has a parameter. - bool isOrdered = loop.getOrderedVal().has_value(); + bool isOrdered = wsloopOp.getOrderedVal().has_value(); std::optional scheduleModifier = - loop.getScheduleModifier(); - bool isSimd = loop.getSimdModifier(); + wsloopOp.getScheduleModifier(); + bool isSimd = wsloopOp.getSimdModifier(); ompBuilder->applyWorkshareLoop( - ompLoc.DL, loopInfo, allocaIP, !loop.getNowait(), + ompLoc.DL, loopInfo, allocaIP, !wsloopOp.getNowait(), convertToScheduleKind(schedule), chunk, isSimd, scheduleModifier == omp::ScheduleModifier::monotonic, scheduleModifier == omp::ScheduleModifier::nonmonotonic, isOrdered); @@ -1080,7 +1082,7 @@ convertOmpWsloop(Operation &opInst, llvm::IRBuilderBase &builder, builder.restoreIP(afterIP); // Process the reductions if required. - if (loop.getNumReductionVars() == 0) + if (wsloopOp.getNumReductionVars() == 0) return success(); // Create the reduction generators. We need to own them here because @@ -1088,7 +1090,7 @@ convertOmpWsloop(Operation &opInst, llvm::IRBuilderBase &builder, SmallVector owningReductionGens; SmallVector owningAtomicReductionGens; SmallVector reductionInfos; - collectReductionInfo(loop, builder, moduleTranslation, reductionDecls, + collectReductionInfo(wsloopOp, builder, moduleTranslation, reductionDecls, owningReductionGens, owningAtomicReductionGens, privateReductionVariables, reductionInfos); @@ -1099,9 +1101,9 @@ convertOmpWsloop(Operation &opInst, llvm::IRBuilderBase &builder, builder.SetInsertPoint(tempTerminator); llvm::OpenMPIRBuilder::InsertPointTy contInsertPoint = ompBuilder->createReductions(builder.saveIP(), allocaIP, reductionInfos, - loop.getNowait(), isByRef); + wsloopOp.getNowait(), isByRef); if (!contInsertPoint.getBlock()) - return loop->emitOpError() << "failed to convert reductions"; + return wsloopOp->emitOpError() << "failed to convert reductions"; auto nextInsertionPoint = ompBuilder->createBarrier(contInsertPoint, llvm::omp::OMPD_for); tempTerminator->eraseFromParent(); diff --git a/mlir/test/CAPI/execution_engine.c b/mlir/test/CAPI/execution_engine.c index 38a8fb8c3e213..81ff8477ffd7b 100644 --- a/mlir/test/CAPI/execution_engine.c +++ b/mlir/test/CAPI/execution_engine.c @@ -99,8 +99,11 @@ void testOmpCreation(void) { " %1 = arith.constant 1 : i32 \n" " %2 = arith.constant 2 : i32 \n" " omp.parallel { \n" -" omp.wsloop for (%3) : i32 = (%0) to (%2) step (%1) { \n" -" omp.yield \n" +" omp.wsloop { \n" +" omp.loop_nest (%3) : i32 = (%0) to (%2) step (%1) { \n" +" omp.yield \n" +" } \n" +" omp.terminator \n" " } \n" " omp.terminator \n" " } \n" diff --git a/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir b/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir index 9f45d139b81f2..3aeb9e70522d5 100644 --- a/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir +++ b/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir @@ -71,15 +71,18 @@ func.func @branch_loop() { func.func @wsloop(%arg0: index, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: index) { // CHECK: omp.parallel omp.parallel { - // CHECK: omp.wsloop for (%[[ARG6:.*]], %[[ARG7:.*]]) : i64 = (%[[ARG0]], %[[ARG1]]) to (%[[ARG2]], %[[ARG3]]) step (%[[ARG4]], %[[ARG5]]) { - "omp.wsloop"(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5) ({ - ^bb0(%arg6: index, %arg7: index): - // CHECK-DAG: %[[CAST_ARG6:.*]] = builtin.unrealized_conversion_cast %[[ARG6]] : i64 to index - // CHECK-DAG: %[[CAST_ARG7:.*]] = builtin.unrealized_conversion_cast %[[ARG7]] : i64 to index - // CHECK: "test.payload"(%[[CAST_ARG6]], %[[CAST_ARG7]]) : (index, index) -> () - "test.payload"(%arg6, %arg7) : (index, index) -> () - omp.yield - }) {operandSegmentSizes = array} : (index, index, index, index, index, index) -> () + // CHECK: omp.wsloop { + "omp.wsloop"() ({ + // CHECK: omp.loop_nest (%[[ARG6:.*]], %[[ARG7:.*]]) : i64 = (%[[ARG0]], %[[ARG1]]) to (%[[ARG2]], %[[ARG3]]) step (%[[ARG4]], %[[ARG5]]) { + omp.loop_nest (%arg6, %arg7) : index = (%arg0, %arg1) to (%arg2, %arg3) step (%arg4, %arg5) { + // CHECK-DAG: %[[CAST_ARG6:.*]] = builtin.unrealized_conversion_cast %[[ARG6]] : i64 to index + // CHECK-DAG: %[[CAST_ARG7:.*]] = builtin.unrealized_conversion_cast %[[ARG7]] : i64 to index + // CHECK: "test.payload"(%[[CAST_ARG6]], %[[CAST_ARG7]]) : (index, index) -> () + "test.payload"(%arg6, %arg7) : (index, index) -> () + omp.yield + } + omp.terminator + }) : () -> () omp.terminator } return @@ -323,12 +326,14 @@ llvm.func @_QPsb() { // CHECK-LABEL: @_QPsimple_reduction // CHECK: %[[RED_ACCUMULATOR:.*]] = llvm.alloca %{{.*}} x i32 {bindc_name = "x", uniq_name = "_QFsimple_reductionEx"} : (i64) -> !llvm.ptr // CHECK: omp.parallel -// CHECK: omp.wsloop reduction(@eqv_reduction %{{.+}} -> %[[PRV:.+]] : !llvm.ptr) for -// CHECK: %[[LPRV:.+]] = llvm.load %[[PRV]] : !llvm.ptr -> i32 -// CHECK: %[[CMP:.+]] = llvm.icmp "eq" %{{.*}}, %[[LPRV]] : i32 -// CHECK: %[[ZEXT:.+]] = llvm.zext %[[CMP]] : i1 to i32 -// CHECK: llvm.store %[[ZEXT]], %[[PRV]] : i32, !llvm.ptr -// CHECK: omp.yield +// CHECK: omp.wsloop reduction(@eqv_reduction %{{.+}} -> %[[PRV:.+]] : !llvm.ptr) +// CHECK-NEXT: omp.loop_nest {{.*}}{ +// CHECK: %[[LPRV:.+]] = llvm.load %[[PRV]] : !llvm.ptr -> i32 +// CHECK: %[[CMP:.+]] = llvm.icmp "eq" %{{.*}}, %[[LPRV]] : i32 +// CHECK: %[[ZEXT:.+]] = llvm.zext %[[CMP]] : i1 to i32 +// CHECK: llvm.store %[[ZEXT]], %[[PRV]] : i32, !llvm.ptr +// CHECK: omp.yield +// CHECK: omp.terminator // CHECK: omp.terminator // CHECK: llvm.return @@ -354,20 +359,23 @@ llvm.func @_QPsimple_reduction(%arg0: !llvm.ptr {fir.bindc_name = "y"}) { %4 = llvm.alloca %3 x i32 {bindc_name = "x", uniq_name = "_QFsimple_reductionEx"} : (i64) -> !llvm.ptr %5 = llvm.zext %2 : i1 to i32 llvm.store %5, %4 : i32, !llvm.ptr - omp.parallel { + omp.parallel { %6 = llvm.alloca %3 x i32 {adapt.valuebyref, in_type = i32, operandSegmentSizes = array, pinned} : (i64) -> !llvm.ptr - omp.wsloop reduction(@eqv_reduction %4 -> %prv : !llvm.ptr) for (%arg1) : i32 = (%1) to (%0) inclusive step (%1) { - llvm.store %arg1, %6 : i32, !llvm.ptr - %7 = llvm.load %6 : !llvm.ptr -> i32 - %8 = llvm.sext %7 : i32 to i64 - %9 = llvm.sub %8, %3 : i64 - %10 = llvm.getelementptr %arg0[0, %9] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.array<100 x i32> - %11 = llvm.load %10 : !llvm.ptr -> i32 - %12 = llvm.load %prv : !llvm.ptr -> i32 - %13 = llvm.icmp "eq" %11, %12 : i32 - %14 = llvm.zext %13 : i1 to i32 - llvm.store %14, %prv : i32, !llvm.ptr - omp.yield + omp.wsloop reduction(@eqv_reduction %4 -> %prv : !llvm.ptr) { + omp.loop_nest (%arg1) : i32 = (%1) to (%0) inclusive step (%1) { + llvm.store %arg1, %6 : i32, !llvm.ptr + %7 = llvm.load %6 : !llvm.ptr -> i32 + %8 = llvm.sext %7 : i32 to i64 + %9 = llvm.sub %8, %3 : i64 + %10 = llvm.getelementptr %arg0[0, %9] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.array<100 x i32> + %11 = llvm.load %10 : !llvm.ptr -> i32 + %12 = llvm.load %prv : !llvm.ptr -> i32 + %13 = llvm.icmp "eq" %11, %12 : i32 + %14 = llvm.zext %13 : i1 to i32 + llvm.store %14, %prv : i32, !llvm.ptr + omp.yield + } + omp.terminator } omp.terminator } diff --git a/mlir/test/Conversion/SCFToOpenMP/reductions.mlir b/mlir/test/Conversion/SCFToOpenMP/reductions.mlir index 3b6c145d62f1a..fc6d56559c261 100644 --- a/mlir/test/Conversion/SCFToOpenMP/reductions.mlir +++ b/mlir/test/Conversion/SCFToOpenMP/reductions.mlir @@ -28,6 +28,7 @@ func.func @reduction1(%arg0 : index, %arg1 : index, %arg2 : index, // CHECK: omp.parallel // CHECK: omp.wsloop // CHECK-SAME: reduction(@[[$REDF]] %[[BUF]] -> %[[PVT_BUF:[a-z0-9]+]] + // CHECK: omp.loop_nest // CHECK: memref.alloca_scope scf.parallel (%i0, %i1) = (%arg0, %arg1) to (%arg2, %arg3) step (%arg4, %step) init (%zero) -> (f32) { @@ -43,6 +44,7 @@ func.func @reduction1(%arg0 : index, %arg1 : index, %arg2 : index, } // CHECK: omp.yield } + // CHECK: omp.terminator // CHECK: omp.terminator // CHECK: llvm.load %[[BUF]] return @@ -107,6 +109,7 @@ func.func @reduction_muli(%arg0 : index, %arg1 : index, %arg2 : index, %one = arith.constant 1 : i32 // CHECK: %[[RED_VAR:.*]] = llvm.alloca %{{.*}} x i32 : (i64) -> !llvm.ptr // CHECK: omp.wsloop reduction(@[[$REDI]] %[[RED_VAR]] -> %[[RED_PVT_VAR:.*]] : !llvm.ptr) + // CHECK: omp.loop_nest scf.parallel (%i0, %i1) = (%arg0, %arg1) to (%arg2, %arg3) step (%arg4, %step) init (%one) -> (i32) { // CHECK: %[[C2:.*]] = arith.constant 2 : i32 @@ -208,6 +211,7 @@ func.func @reduction4(%arg0 : index, %arg1 : index, %arg2 : index, // CHECK: omp.wsloop // CHECK-SAME: reduction(@[[$REDF1]] %[[BUF1]] -> %[[PVT_BUF1:[a-z0-9]+]] // CHECK-SAME: @[[$REDF2]] %[[BUF2]] -> %[[PVT_BUF2:[a-z0-9]+]] + // CHECK: omp.loop_nest // CHECK: memref.alloca_scope %res:2 = scf.parallel (%i0, %i1) = (%arg0, %arg1) to (%arg2, %arg3) step (%arg4, %step) init (%zero, %ione) -> (f32, i64) { @@ -236,6 +240,7 @@ func.func @reduction4(%arg0 : index, %arg1 : index, %arg2 : index, } // CHECK: omp.yield } + // CHECK: omp.terminator // CHECK: omp.terminator // CHECK: %[[RES1:.*]] = llvm.load %[[BUF1]] : !llvm.ptr -> f32 // CHECK: %[[RES2:.*]] = llvm.load %[[BUF2]] : !llvm.ptr -> i64 diff --git a/mlir/test/Conversion/SCFToOpenMP/scf-to-openmp.mlir b/mlir/test/Conversion/SCFToOpenMP/scf-to-openmp.mlir index acd2690c56e2e..b2f19d294cb5f 100644 --- a/mlir/test/Conversion/SCFToOpenMP/scf-to-openmp.mlir +++ b/mlir/test/Conversion/SCFToOpenMP/scf-to-openmp.mlir @@ -2,10 +2,11 @@ // CHECK-LABEL: @parallel func.func @parallel(%arg0: index, %arg1: index, %arg2: index, - %arg3: index, %arg4: index, %arg5: index) { + %arg3: index, %arg4: index, %arg5: index) { // CHECK: %[[FOUR:.+]] = llvm.mlir.constant(4 : i32) : i32 // CHECK: omp.parallel num_threads(%[[FOUR]] : i32) { - // CHECK: omp.wsloop for (%[[LVAR1:.*]], %[[LVAR2:.*]]) : index = (%arg0, %arg1) to (%arg2, %arg3) step (%arg4, %arg5) { + // CHECK: omp.wsloop { + // CHECK: omp.loop_nest (%[[LVAR1:.*]], %[[LVAR2:.*]]) : index = (%arg0, %arg1) to (%arg2, %arg3) step (%arg4, %arg5) { // CHECK: memref.alloca_scope scf.parallel (%i, %j) = (%arg0, %arg1) to (%arg2, %arg3) step (%arg4, %arg5) { // CHECK: "test.payload"(%[[LVAR1]], %[[LVAR2]]) : (index, index) -> () @@ -13,6 +14,8 @@ func.func @parallel(%arg0: index, %arg1: index, %arg2: index, // CHECK: omp.yield // CHECK: } } + // CHECK: omp.terminator + // CHECK: } // CHECK: omp.terminator // CHECK: } return @@ -23,20 +26,26 @@ func.func @nested_loops(%arg0: index, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: index) { // CHECK: %[[FOUR:.+]] = llvm.mlir.constant(4 : i32) : i32 // CHECK: omp.parallel num_threads(%[[FOUR]] : i32) { - // CHECK: omp.wsloop for (%[[LVAR_OUT1:.*]]) : index = (%arg0) to (%arg2) step (%arg4) { - // CHECK: memref.alloca_scope + // CHECK: omp.wsloop { + // CHECK: omp.loop_nest (%[[LVAR_OUT1:.*]]) : index = (%arg0) to (%arg2) step (%arg4) { + // CHECK: memref.alloca_scope scf.parallel (%i) = (%arg0) to (%arg2) step (%arg4) { // CHECK: omp.parallel - // CHECK: omp.wsloop for (%[[LVAR_IN1:.*]]) : index = (%arg1) to (%arg3) step (%arg5) { + // CHECK: omp.wsloop { + // CHECK: omp.loop_nest (%[[LVAR_IN1:.*]]) : index = (%arg1) to (%arg3) step (%arg5) { // CHECK: memref.alloca_scope scf.parallel (%j) = (%arg1) to (%arg3) step (%arg5) { // CHECK: "test.payload"(%[[LVAR_OUT1]], %[[LVAR_IN1]]) : (index, index) -> () "test.payload"(%i, %j) : (index, index) -> () // CHECK: } } - // CHECK: omp.yield + // CHECK: omp.yield + // CHECK: } + // CHECK: omp.terminator // CHECK: } } + // CHECK: omp.terminator + // CHECK: } // CHECK: omp.terminator // CHECK: } return @@ -47,7 +56,8 @@ func.func @adjacent_loops(%arg0: index, %arg1: index, %arg2: index, %arg3: index, %arg4: index, %arg5: index) { // CHECK: %[[FOUR:.+]] = llvm.mlir.constant(4 : i32) : i32 // CHECK: omp.parallel num_threads(%[[FOUR]] : i32) { - // CHECK: omp.wsloop for (%[[LVAR_AL1:.*]]) : index = (%arg0) to (%arg2) step (%arg4) { + // CHECK: omp.wsloop { + // CHECK: omp.loop_nest (%[[LVAR_AL1:.*]]) : index = (%arg0) to (%arg2) step (%arg4) { // CHECK: memref.alloca_scope scf.parallel (%i) = (%arg0) to (%arg2) step (%arg4) { // CHECK: "test.payload1"(%[[LVAR_AL1]]) : (index) -> () @@ -55,12 +65,15 @@ func.func @adjacent_loops(%arg0: index, %arg1: index, %arg2: index, // CHECK: omp.yield // CHECK: } } + // CHECK: omp.terminator + // CHECK: } // CHECK: omp.terminator // CHECK: } // CHECK: %[[FOUR:.+]] = llvm.mlir.constant(4 : i32) : i32 // CHECK: omp.parallel num_threads(%[[FOUR]] : i32) { - // CHECK: omp.wsloop for (%[[LVAR_AL2:.*]]) : index = (%arg1) to (%arg3) step (%arg5) { + // CHECK: omp.wsloop { + // CHECK: omp.loop_nest (%[[LVAR_AL2:.*]]) : index = (%arg1) to (%arg3) step (%arg5) { // CHECK: memref.alloca_scope scf.parallel (%j) = (%arg1) to (%arg3) step (%arg5) { // CHECK: "test.payload2"(%[[LVAR_AL2]]) : (index) -> () @@ -68,6 +81,8 @@ func.func @adjacent_loops(%arg0: index, %arg1: index, %arg2: index, // CHECK: omp.yield // CHECK: } } + // CHECK: omp.terminator + // CHECK: } // CHECK: omp.terminator // CHECK: } return diff --git a/mlir/test/Dialect/LLVMIR/legalize-for-export.mlir b/mlir/test/Dialect/LLVMIR/legalize-for-export.mlir index 37720e98d92a9..b1b06740f1944 100644 --- a/mlir/test/Dialect/LLVMIR/legalize-for-export.mlir +++ b/mlir/test/Dialect/LLVMIR/legalize-for-export.mlir @@ -32,14 +32,17 @@ llvm.func @repeated_successor_no_args(%arg0: i1) { // CHECK: @repeated_successor_openmp llvm.func @repeated_successor_openmp(%arg0: i64, %arg1: i64, %arg2: i64, %arg3: i1) { - omp.wsloop for (%arg4) : i64 = (%arg0) to (%arg1) step (%arg2) { - // CHECK: llvm.cond_br %{{.*}}, ^[[BB1:.*]]({{.*}}), ^[[BB2:.*]]({{.*}}) - llvm.cond_br %arg3, ^bb1(%arg0 : i64), ^bb1(%arg1 : i64) - // CHECK: ^[[BB1]] - ^bb1(%0: i64): // 2 preds: ^bb0, ^bb0 - omp.yield - // CHECK: ^[[BB2]](%[[ARG:.*]]: i64): - // CHECK: llvm.br ^[[BB1]](%[[ARG]] : i64) + omp.wsloop { + omp.loop_nest (%arg4) : i64 = (%arg0) to (%arg1) step (%arg2) { + // CHECK: llvm.cond_br %{{.*}}, ^[[BB1:.*]]({{.*}}), ^[[BB2:.*]]({{.*}}) + llvm.cond_br %arg3, ^bb1(%arg0 : i64), ^bb1(%arg1 : i64) + // CHECK: ^[[BB1]] + ^bb1(%0: i64): // 2 preds: ^bb0, ^bb0 + omp.yield + // CHECK: ^[[BB2]](%[[ARG:.*]]: i64): + // CHECK: llvm.br ^[[BB1]](%[[ARG]] : i64) + } + omp.terminator } llvm.return } diff --git a/mlir/test/Dialect/OpenMP/invalid.mlir b/mlir/test/Dialect/OpenMP/invalid.mlir index 2f24dce4233e4..e329b3010017c 100644 --- a/mlir/test/Dialect/OpenMP/invalid.mlir +++ b/mlir/test/Dialect/OpenMP/invalid.mlir @@ -149,50 +149,74 @@ func.func @invalid_parent(%lb : index, %ub : index, %step : index) { // ----- func.func @invalid_wrapper(%lb : index, %ub : index, %step : index) { - // TODO Remove induction variables from omp.wsloop. - omp.wsloop for (%iv) : index = (%lb) to (%ub) step (%step) { + omp.parallel { %0 = arith.constant 0 : i32 // expected-error@+1 {{op expects parent op to be a valid loop wrapper}} omp.loop_nest (%iv2) : index = (%lb) to (%ub) step (%step) { omp.yield } - omp.yield + omp.terminator } } // ----- func.func @type_mismatch(%lb : index, %ub : index, %step : index) { - // TODO Remove induction variables from omp.wsloop. - omp.wsloop for (%iv) : index = (%lb) to (%ub) step (%step) { + omp.wsloop { // expected-error@+1 {{range argument type does not match corresponding IV type}} "omp.loop_nest" (%lb, %ub, %step) ({ ^bb0(%iv2: i32): omp.yield }) : (index, index, index) -> () - omp.yield + omp.terminator } } // ----- func.func @iv_number_mismatch(%lb : index, %ub : index, %step : index) { - // TODO Remove induction variables from omp.wsloop. - omp.wsloop for (%iv) : index = (%lb) to (%ub) step (%step) { + omp.wsloop { // expected-error@+1 {{number of range arguments and IVs do not match}} "omp.loop_nest" (%lb, %ub, %step) ({ ^bb0(%iv1 : index, %iv2 : index): omp.yield }) : (index, index, index) -> () - omp.yield + omp.terminator + } +} + +// ----- + +func.func @no_wrapper(%lb : index, %ub : index, %step : index) { + // expected-error @below {{op must be a loop wrapper}} + omp.wsloop { + %0 = arith.constant 0 : i32 + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + omp.yield + } + omp.terminator + } +} + +// ----- + +func.func @invalid_nested_wrapper(%lb : index, %ub : index, %step : index) { + // expected-error @below {{only supported nested wrapper is 'omp.simd'}} + omp.wsloop { + omp.distribute { + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + omp.yield + } + omp.terminator + } + omp.terminator } } // ----- func.func @no_loops(%lb : index, %ub : index, %step : index) { - // TODO Remove induction variables from omp.wsloop. - omp.wsloop for (%iv) : index = (%lb) to (%ub) step (%step) { + omp.wsloop { // expected-error@+1 {{op must represent at least one loop}} "omp.loop_nest" () ({ ^bb0(): @@ -205,10 +229,12 @@ func.func @no_loops(%lb : index, %ub : index, %step : index) { // ----- func.func @inclusive_not_a_clause(%lb : index, %ub : index, %step : index) { - // expected-error @below {{expected 'for'}} - omp.wsloop nowait inclusive - for (%iv) : index = (%lb) to (%ub) step (%step) { - omp.yield + // expected-error @below {{expected '{'}} + omp.wsloop nowait inclusive { + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + omp.yield + } + omp.terminator } } @@ -216,39 +242,47 @@ func.func @inclusive_not_a_clause(%lb : index, %ub : index, %step : index) { func.func @order_value(%lb : index, %ub : index, %step : index) { // expected-error @below {{invalid clause value: 'default'}} - omp.wsloop order(default) - for (%iv) : index = (%lb) to (%ub) step (%step) { - omp.yield + omp.wsloop order(default) { + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + omp.yield + } + omp.terminator } } // ----- func.func @if_not_allowed(%lb : index, %ub : index, %step : index, %bool_var : i1) { - // expected-error @below {{expected 'for'}} - omp.wsloop if(%bool_var: i1) - for (%iv) : index = (%lb) to (%ub) step (%step) { - omp.yield + // expected-error @below {{expected '{'}} + omp.wsloop if(%bool_var: i1) { + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + omp.yield + } + omp.terminator } } // ----- func.func @num_threads_not_allowed(%lb : index, %ub : index, %step : index, %int_var : i32) { - // expected-error @below {{expected 'for'}} - omp.wsloop num_threads(%int_var: i32) - for (%iv) : index = (%lb) to (%ub) step (%step) { - omp.yield + // expected-error @below {{expected '{'}} + omp.wsloop num_threads(%int_var: i32) { + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + omp.yield + } + omp.terminator } } // ----- func.func @proc_bind_not_allowed(%lb : index, %ub : index, %step : index) { - // expected-error @below {{expected 'for'}} - omp.wsloop proc_bind(close) - for (%iv) : index = (%lb) to (%ub) step (%step) { - omp.yield + // expected-error @below {{expected '{'}} + omp.wsloop proc_bind(close) { + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + omp.yield + } + omp.terminator } } @@ -256,9 +290,11 @@ func.func @proc_bind_not_allowed(%lb : index, %ub : index, %step : index) { llvm.func @test_omp_wsloop_dynamic_bad_modifier(%lb : i64, %ub : i64, %step : i64) -> () { // expected-error @+1 {{unknown modifier type: ginandtonic}} - omp.wsloop schedule(dynamic, ginandtonic) - for (%iv) : i64 = (%lb) to (%ub) step (%step) { - omp.yield + omp.wsloop schedule(dynamic, ginandtonic) { + omp.loop_nest (%iv) : i64 = (%lb) to (%ub) step (%step) { + omp.yield + } + omp.terminator } llvm.return } @@ -267,9 +303,11 @@ llvm.func @test_omp_wsloop_dynamic_bad_modifier(%lb : i64, %ub : i64, %step : i6 llvm.func @test_omp_wsloop_dynamic_many_modifier(%lb : i64, %ub : i64, %step : i64) -> () { // expected-error @+1 {{unexpected modifier(s)}} - omp.wsloop schedule(dynamic, monotonic, monotonic, monotonic) - for (%iv) : i64 = (%lb) to (%ub) step (%step) { - omp.yield + omp.wsloop schedule(dynamic, monotonic, monotonic, monotonic) { + omp.loop_nest (%iv) : i64 = (%lb) to (%ub) step (%step) { + omp.yield + } + omp.terminator } llvm.return } @@ -278,9 +316,11 @@ llvm.func @test_omp_wsloop_dynamic_many_modifier(%lb : i64, %ub : i64, %step : i llvm.func @test_omp_wsloop_dynamic_wrong_modifier(%lb : i64, %ub : i64, %step : i64) -> () { // expected-error @+1 {{incorrect modifier order}} - omp.wsloop schedule(dynamic, simd, monotonic) - for (%iv) : i64 = (%lb) to (%ub) step (%step) { - omp.yield + omp.wsloop schedule(dynamic, simd, monotonic) { + omp.loop_nest (%iv) : i64 = (%lb) to (%ub) step (%step) { + omp.yield + } + omp.terminator } llvm.return } @@ -289,9 +329,11 @@ llvm.func @test_omp_wsloop_dynamic_wrong_modifier(%lb : i64, %ub : i64, %step : llvm.func @test_omp_wsloop_dynamic_wrong_modifier2(%lb : i64, %ub : i64, %step : i64) -> () { // expected-error @+1 {{incorrect modifier order}} - omp.wsloop schedule(dynamic, monotonic, monotonic) - for (%iv) : i64 = (%lb) to (%ub) step (%step) { - omp.yield + omp.wsloop schedule(dynamic, monotonic, monotonic) { + omp.loop_nest (%iv) : i64 = (%lb) to (%ub) step (%step) { + omp.yield + } + omp.terminator } llvm.return } @@ -300,9 +342,11 @@ llvm.func @test_omp_wsloop_dynamic_wrong_modifier2(%lb : i64, %ub : i64, %step : llvm.func @test_omp_wsloop_dynamic_wrong_modifier3(%lb : i64, %ub : i64, %step : i64) -> () { // expected-error @+1 {{incorrect modifier order}} - omp.wsloop schedule(dynamic, simd, simd) - for (%iv) : i64 = (%lb) to (%ub) step (%step) { - omp.yield + omp.wsloop schedule(dynamic, simd, simd) { + omp.loop_nest (%iv) : i64 = (%lb) to (%ub) step (%step) { + omp.yield + } + omp.terminator } llvm.return } @@ -601,11 +645,13 @@ func.func @foo(%lb : index, %ub : index, %step : index) { %1 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr // expected-error @below {{expected symbol reference @foo to point to a reduction declaration}} - omp.wsloop reduction(@foo %0 -> %prv : !llvm.ptr) - for (%iv) : index = (%lb) to (%ub) step (%step) { - %2 = arith.constant 2.0 : f32 - omp.reduction %2, %1 : f32, !llvm.ptr - omp.yield + omp.wsloop reduction(@foo %0 -> %prv : !llvm.ptr) { + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + %2 = arith.constant 2.0 : f32 + omp.reduction %2, %1 : f32, !llvm.ptr + omp.yield + } + omp.terminator } return } @@ -629,11 +675,13 @@ func.func @foo(%lb : index, %ub : index, %step : index) { %0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr // expected-error @below {{accumulator variable used more than once}} - omp.wsloop reduction(@add_f32 %0 -> %prv : !llvm.ptr, @add_f32 %0 -> %prv1 : !llvm.ptr) - for (%iv) : index = (%lb) to (%ub) step (%step) { - %2 = arith.constant 2.0 : f32 - omp.reduction %2, %0 : f32, !llvm.ptr - omp.yield + omp.wsloop reduction(@add_f32 %0 -> %prv : !llvm.ptr, @add_f32 %0 -> %prv1 : !llvm.ptr) { + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + %2 = arith.constant 2.0 : f32 + omp.reduction %2, %0 : f32, !llvm.ptr + omp.yield + } + omp.terminator } return } @@ -662,11 +710,13 @@ func.func @foo(%lb : index, %ub : index, %step : index, %mem : memref<1xf32>) { %c1 = arith.constant 1 : i32 // expected-error @below {{expected accumulator ('memref<1xf32>') to be the same type as reduction declaration ('!llvm.ptr')}} - omp.wsloop reduction(@add_f32 %mem -> %prv : memref<1xf32>) - for (%iv) : index = (%lb) to (%ub) step (%step) { - %2 = arith.constant 2.0 : f32 - omp.reduction %2, %mem : f32, memref<1xf32> - omp.yield + omp.wsloop reduction(@add_f32 %mem -> %prv : memref<1xf32>) { + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + %2 = arith.constant 2.0 : f32 + omp.reduction %2, %mem : f32, memref<1xf32> + omp.yield + } + omp.terminator } return } @@ -698,60 +748,112 @@ omp.critical.declare @mutex hint(invalid_hint) // ----- -func.func @omp_ordered1(%arg1 : i32, %arg2 : i32, %arg3 : i32) -> () { - omp.wsloop ordered(1) - for (%0) : i32 = (%arg1) to (%arg2) step (%arg3) { - // expected-error @below {{ordered region must be closely nested inside a worksharing-loop region with an ordered clause without parameter present}} - omp.ordered.region { - omp.terminator +func.func @omp_ordered_region1(%x : i32) -> () { + omp.distribute { + omp.loop_nest (%i) : i32 = (%x) to (%x) step (%x) { + // expected-error @below {{op must be nested inside of a worksharing, simd or worksharing simd loop}} + omp.ordered.region { + omp.terminator + } + omp.yield } - omp.yield + omp.terminator } return } // ----- -func.func @omp_ordered2(%arg1 : i32, %arg2 : i32, %arg3 : i32) -> () { - omp.wsloop for (%0) : i32 = (%arg1) to (%arg2) step (%arg3) { - // expected-error @below {{ordered region must be closely nested inside a worksharing-loop region with an ordered clause without parameter present}} - omp.ordered.region { - omp.terminator +func.func @omp_ordered_region2(%x : i32) -> () { + omp.wsloop { + omp.loop_nest (%i) : i32 = (%x) to (%x) step (%x) { + // expected-error @below {{the enclosing worksharing-loop region must have an ordered clause}} + omp.ordered.region { + omp.terminator + } + omp.yield } - omp.yield + omp.terminator } return } // ----- -func.func @omp_ordered3(%vec0 : i64) -> () { - // expected-error @below {{ordered depend directive must be closely nested inside a worksharing-loop with ordered clause with parameter present}} +func.func @omp_ordered_region3(%x : i32) -> () { + omp.wsloop ordered(1) { + omp.loop_nest (%i) : i32 = (%x) to (%x) step (%x) { + // expected-error @below {{the enclosing loop's ordered clause must not have a parameter present}} + omp.ordered.region { + omp.terminator + } + omp.yield + } + omp.terminator + } + return +} + +// ----- + +func.func @omp_ordered1(%vec0 : i64) -> () { + // expected-error @below {{op must be nested inside of a loop}} omp.ordered depend_type(dependsink) depend_vec(%vec0 : i64) {num_loops_val = 1 : i64} return } // ----- -func.func @omp_ordered4(%arg1 : i32, %arg2 : i32, %arg3 : i32, %vec0 : i64) -> () { - omp.wsloop ordered(0) - for (%0) : i32 = (%arg1) to (%arg2) step (%arg3) { - // expected-error @below {{ordered depend directive must be closely nested inside a worksharing-loop with ordered clause with parameter present}} - omp.ordered depend_type(dependsink) depend_vec(%vec0 : i64) {num_loops_val = 1 : i64} +func.func @omp_ordered2(%arg1 : i32, %arg2 : i32, %arg3 : i32, %vec0 : i64) -> () { + omp.distribute { + omp.loop_nest (%0) : i32 = (%arg1) to (%arg2) step (%arg3) { + // expected-error @below {{op must be nested inside of a worksharing, simd or worksharing simd loop}} + omp.ordered depend_type(dependsink) depend_vec(%vec0 : i64) {num_loops_val = 1 : i64} + omp.yield + } + omp.terminator + } + return +} - omp.yield +// ----- + +func.func @omp_ordered3(%arg1 : i32, %arg2 : i32, %arg3 : i32, %vec0 : i64) -> () { + omp.wsloop { + omp.loop_nest (%0) : i32 = (%arg1) to (%arg2) step (%arg3) { + // expected-error @below {{the enclosing worksharing-loop region must have an ordered clause}} + omp.ordered depend_type(dependsink) depend_vec(%vec0 : i64) {num_loops_val = 1 : i64} + omp.yield + } + omp.terminator } return } + // ----- -func.func @omp_ordered5(%arg1 : i32, %arg2 : i32, %arg3 : i32, %vec0 : i64, %vec1 : i64) -> () { - omp.wsloop ordered(1) - for (%0) : i32 = (%arg1) to (%arg2) step (%arg3) { - // expected-error @below {{number of variables in depend clause does not match number of iteration variables in the doacross loop}} - omp.ordered depend_type(dependsource) depend_vec(%vec0, %vec1 : i64, i64) {num_loops_val = 2 : i64} +func.func @omp_ordered4(%arg1 : i32, %arg2 : i32, %arg3 : i32, %vec0 : i64) -> () { + omp.wsloop ordered(0) { + omp.loop_nest (%0) : i32 = (%arg1) to (%arg2) step (%arg3) { + // expected-error @below {{the enclosing loop's ordered clause must have a parameter present}} + omp.ordered depend_type(dependsink) depend_vec(%vec0 : i64) {num_loops_val = 1 : i64} + omp.yield + } + omp.terminator + } + return +} - omp.yield +// ----- + +func.func @omp_ordered5(%arg1 : i32, %arg2 : i32, %arg3 : i32, %vec0 : i64, %vec1 : i64) -> () { + omp.wsloop ordered(1) { + omp.loop_nest (%0) : i32 = (%arg1) to (%arg2) step (%arg3) { + // expected-error @below {{number of variables in depend clause does not match number of iteration variables in the doacross loop}} + omp.ordered depend_type(dependsource) depend_vec(%vec0, %vec1 : i64, i64) {num_loops_val = 2 : i64} + omp.yield + } + omp.terminator } return } @@ -1590,11 +1692,13 @@ func.func @omp_cancel2() { // ----- func.func @omp_cancel3(%arg1 : i32, %arg2 : i32, %arg3 : i32) -> () { - omp.wsloop nowait - for (%0) : i32 = (%arg1) to (%arg2) step (%arg3) { - // expected-error @below {{A worksharing construct that is canceled must not have a nowait clause}} - omp.cancel cancellation_construct_type(loop) - // CHECK: omp.terminator + omp.wsloop nowait { + omp.loop_nest (%0) : i32 = (%arg1) to (%arg2) step (%arg3) { + // expected-error @below {{A worksharing construct that is canceled must not have a nowait clause}} + omp.cancel cancellation_construct_type(loop) + // CHECK: omp.yield + omp.yield + } omp.terminator } return @@ -1603,11 +1707,13 @@ func.func @omp_cancel3(%arg1 : i32, %arg2 : i32, %arg3 : i32) -> () { // ----- func.func @omp_cancel4(%arg1 : i32, %arg2 : i32, %arg3 : i32) -> () { - omp.wsloop ordered(1) - for (%0) : i32 = (%arg1) to (%arg2) step (%arg3) { - // expected-error @below {{A worksharing construct that is canceled must not have an ordered clause}} - omp.cancel cancellation_construct_type(loop) - // CHECK: omp.terminator + omp.wsloop ordered(1) { + omp.loop_nest (%0) : i32 = (%arg1) to (%arg2) step (%arg3) { + // expected-error @below {{A worksharing construct that is canceled must not have an ordered clause}} + omp.cancel cancellation_construct_type(loop) + // CHECK: omp.yield + omp.yield + } omp.terminator } return @@ -2143,4 +2249,4 @@ func.func @undefined_privatizer(%arg0: !llvm.ptr) { omp.terminator }) : (!llvm.ptr) -> () return -} +} \ No newline at end of file diff --git a/mlir/test/Dialect/OpenMP/ops.mlir b/mlir/test/Dialect/OpenMP/ops.mlir index c10fc88211c36..a012588f0b552 100644 --- a/mlir/test/Dialect/OpenMP/ops.mlir +++ b/mlir/test/Dialect/OpenMP/ops.mlir @@ -90,10 +90,9 @@ func.func @omp_parallel(%data_var : memref, %if_cond : i1, %num_threads : i // CHECK-NEXT: omp.parallel omp.parallel { // CHECK-NEXT: omp.wsloop - // TODO Remove induction variables from omp.wsloop. - omp.wsloop for (%iv) : index = (%idx) to (%idx) step (%idx) { + omp.wsloop { // CHECK-NEXT: omp.loop_nest - omp.loop_nest (%iv2) : index = (%idx) to (%idx) step (%idx) { + omp.loop_nest (%iv) : index = (%idx) to (%idx) step (%idx) { omp.yield } omp.terminator @@ -153,49 +152,45 @@ func.func @omp_parallel_pretty(%data_var : memref, %if_cond : i1, %num_thre // CHECK-LABEL: omp_loop_nest func.func @omp_loop_nest(%lb : index, %ub : index, %step : index) -> () { - // TODO Remove induction variables from omp.wsloop. - omp.wsloop for (%iv) : index = (%lb) to (%ub) step (%step) { + omp.wsloop { // CHECK: omp.loop_nest // CHECK-SAME: (%{{.*}}) : index = // CHECK-SAME: (%{{.*}}) to (%{{.*}}) step (%{{.*}}) "omp.loop_nest" (%lb, %ub, %step) ({ - ^bb0(%iv2: index): + ^bb0(%iv: index): omp.yield }) : (index, index, index) -> () - omp.yield + omp.terminator } - // TODO Remove induction variables from omp.wsloop. - omp.wsloop for (%iv) : index = (%lb) to (%ub) step (%step) { + omp.wsloop { // CHECK: omp.loop_nest // CHECK-SAME: (%{{.*}}) : index = // CHECK-SAME: (%{{.*}}) to (%{{.*}}) inclusive step (%{{.*}}) "omp.loop_nest" (%lb, %ub, %step) ({ - ^bb0(%iv2: index): + ^bb0(%iv: index): omp.yield }) {inclusive} : (index, index, index) -> () - omp.yield + omp.terminator } - // TODO Remove induction variables from omp.wsloop. - omp.wsloop for (%iv) : index = (%lb) to (%ub) step (%step) { + omp.wsloop { // CHECK: omp.loop_nest // CHECK-SAME: (%{{.*}}, %{{.*}}) : index = // CHECK-SAME: (%{{.*}}, %{{.*}}) to (%{{.*}}, %{{.*}}) step (%{{.*}}, %{{.*}}) "omp.loop_nest" (%lb, %lb, %ub, %ub, %step, %step) ({ - ^bb0(%iv2: index, %iv3: index): + ^bb0(%iv: index, %iv3: index): omp.yield }) : (index, index, index, index, index, index) -> () - omp.yield + omp.terminator } - // TODO Remove induction variables from omp.wsloop. - omp.wsloop for (%iv) : index = (%lb) to (%ub) step (%step) { + omp.wsloop { // CHECK: omp.loop_nest // CHECK-SAME: (%{{.*}}) : index = // CHECK-SAME: (%{{.*}}) to (%{{.*}}) step (%{{.*}}) "omp.loop_nest" (%lb, %ub, %step) ({ - ^bb0(%iv2: index): + ^bb0(%iv: index): // CHECK: test.op1 "test.op1"(%lb) : (index) -> () // CHECK: test.op2 @@ -203,7 +198,7 @@ func.func @omp_loop_nest(%lb : index, %ub : index, %step : index) -> () { // CHECK: omp.yield omp.yield }) : (index, index, index) -> () - omp.yield + omp.terminator } return @@ -211,45 +206,41 @@ func.func @omp_loop_nest(%lb : index, %ub : index, %step : index) -> () { // CHECK-LABEL: omp_loop_nest_pretty func.func @omp_loop_nest_pretty(%lb : index, %ub : index, %step : index) -> () { - // TODO Remove induction variables from omp.wsloop. - omp.wsloop for (%iv) : index = (%lb) to (%ub) step (%step) { + omp.wsloop { // CHECK: omp.loop_nest // CHECK-SAME: (%{{.*}}) : index = // CHECK-SAME: (%{{.*}}) to (%{{.*}}) step (%{{.*}}) - omp.loop_nest (%iv2) : index = (%lb) to (%ub) step (%step) { + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { omp.yield } - omp.yield + omp.terminator } - // TODO Remove induction variables from omp.wsloop. - omp.wsloop for (%iv) : index = (%lb) to (%ub) step (%step) { + omp.wsloop { // CHECK: omp.loop_nest // CHECK-SAME: (%{{.*}}) : index = // CHECK-SAME: (%{{.*}}) to (%{{.*}}) inclusive step (%{{.*}}) - omp.loop_nest (%iv2) : index = (%lb) to (%ub) inclusive step (%step) { + omp.loop_nest (%iv) : index = (%lb) to (%ub) inclusive step (%step) { omp.yield } - omp.yield + omp.terminator } - // TODO Remove induction variables from omp.wsloop. - omp.wsloop for (%iv) : index = (%lb) to (%ub) step (%step) { + omp.wsloop { // CHECK: omp.loop_nest // CHECK-SAME: (%{{.*}}) : index = // CHECK-SAME: (%{{.*}}, %{{.*}}) to (%{{.*}}, %{{.*}}) step (%{{.*}}, %{{.*}}) - omp.loop_nest (%iv2, %iv3) : index = (%lb, %lb) to (%ub, %ub) step (%step, %step) { + omp.loop_nest (%iv1, %iv2) : index = (%lb, %lb) to (%ub, %ub) step (%step, %step) { omp.yield } - omp.yield + omp.terminator } - // TODO Remove induction variables from omp.wsloop. - omp.wsloop for (%iv) : index = (%lb) to (%ub) step (%step) { + omp.wsloop { // CHECK: omp.loop_nest // CHECK-SAME: (%{{.*}}) : index = // CHECK-SAME: (%{{.*}}) to (%{{.*}}) step (%{{.*}}) - omp.loop_nest (%iv2) : index = (%lb) to (%ub) step (%step) { + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { // CHECK: test.op1 "test.op1"(%lb) : (index) -> () // CHECK: test.op2 @@ -257,201 +248,271 @@ func.func @omp_loop_nest_pretty(%lb : index, %ub : index, %step : index) -> () { // CHECK: omp.yield omp.yield } - omp.yield + omp.terminator } return } -// CHECK-LABEL: omp_wsloop -func.func @omp_wsloop(%lb : index, %ub : index, %step : index, %data_var : memref, %linear_var : i32, %chunk_var : i32) -> () { +// CHECK-LABEL: omp_loop_nest_pretty_multi_block +func.func @omp_loop_nest_pretty_multi_block(%lb : index, %ub : index, + %step : index, %data1 : memref, %data2 : memref) -> () { - // CHECK: omp.wsloop ordered(1) - // CHECK-SAME: for (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) - "omp.wsloop" (%lb, %ub, %step) ({ - ^bb0(%iv: index): + omp.wsloop { + // CHECK: omp.loop_nest (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + %1 = "test.payload"(%iv) : (index) -> (i32) + cf.br ^bb1(%1: i32) + ^bb1(%arg: i32): + memref.store %arg, %data1[%iv] : memref omp.yield - }) {operandSegmentSizes = array, ordered_val = 1} : - (index, index, index) -> () + } + omp.terminator + } - // CHECK: omp.wsloop linear(%{{.*}} = %{{.*}} : memref) schedule(static) - // CHECK-SAME: for (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) - "omp.wsloop" (%lb, %ub, %step, %data_var, %linear_var) ({ - ^bb0(%iv: index): + omp.wsloop { + // CHECK: omp.loop_nest (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + %c = "test.condition"(%iv) : (index) -> (i1) + %v1 = "test.payload"(%iv) : (index) -> (i32) + cf.cond_br %c, ^bb1(%v1: i32), ^bb2(%v1: i32) + ^bb1(%arg0: i32): + memref.store %arg0, %data1[%iv] : memref + cf.br ^bb3 + ^bb2(%arg1: i32): + memref.store %arg1, %data2[%iv] : memref + cf.br ^bb3 + ^bb3: omp.yield - }) {operandSegmentSizes = array, schedule_val = #omp} : - (index, index, index, memref, i32) -> () + } + omp.terminator + } - // CHECK: omp.wsloop linear(%{{.*}} = %{{.*}} : memref, %{{.*}} = %{{.*}} : memref) schedule(static) - // CHECK-SAME: for (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) - "omp.wsloop" (%lb, %ub, %step, %data_var, %data_var, %linear_var, %linear_var) ({ - ^bb0(%iv: index): + omp.wsloop { + // CHECK: omp.loop_nest (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + %c = "test.condition"(%iv) : (index) -> (i1) + %v1 = "test.payload"(%iv) : (index) -> (i32) + cf.cond_br %c, ^bb1(%v1: i32), ^bb2(%v1: i32) + ^bb1(%arg0: i32): + memref.store %arg0, %data1[%iv] : memref + omp.yield + ^bb2(%arg1: i32): + memref.store %arg1, %data2[%iv] : memref omp.yield - }) {operandSegmentSizes = array, schedule_val = #omp} : - (index, index, index, memref, memref, i32, i32) -> () + } + omp.terminator + } - // CHECK: omp.wsloop linear(%{{.*}} = %{{.*}} : memref) schedule(dynamic = %{{.*}}) ordered(2) - // CHECK-SAME: for (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) - "omp.wsloop" (%lb, %ub, %step, %data_var, %linear_var, %chunk_var) ({ - ^bb0(%iv: index): + return +} + +// CHECK-LABEL: omp_loop_nest_pretty_non_index +func.func @omp_loop_nest_pretty_non_index(%lb1 : i32, %ub1 : i32, %step1 : i32, + %lb2 : i64, %ub2 : i64, %step2 : i64, %data1 : memref, + %data2 : memref) -> () { + + omp.wsloop { + // CHECK: omp.loop_nest (%{{.*}}) : i32 = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) + omp.loop_nest (%iv1) : i32 = (%lb1) to (%ub1) step (%step1) { + %1 = "test.payload"(%iv1) : (i32) -> (index) + cf.br ^bb1(%1: index) + ^bb1(%arg1: index): + memref.store %iv1, %data1[%arg1] : memref omp.yield - }) {operandSegmentSizes = array, schedule_val = #omp, ordered_val = 2} : - (index, index, index, memref, i32, i32) -> () + } + omp.terminator + } - // CHECK: omp.wsloop schedule(auto) nowait - // CHECK-SAME: for (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) - "omp.wsloop" (%lb, %ub, %step) ({ - ^bb0(%iv: index): + omp.wsloop { + // CHECK: omp.loop_nest (%{{.*}}) : i64 = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) + omp.loop_nest (%iv) : i64 = (%lb2) to (%ub2) step (%step2) { + %2 = "test.payload"(%iv) : (i64) -> (index) + cf.br ^bb1(%2: index) + ^bb1(%arg2: index): + memref.store %iv, %data2[%arg2] : memref omp.yield - }) {operandSegmentSizes = array, nowait, schedule_val = #omp} : - (index, index, index) -> () + } + omp.terminator + } return } -// CHECK-LABEL: omp_wsloop_pretty -func.func @omp_wsloop_pretty(%lb : index, %ub : index, %step : index, %data_var : memref, %linear_var : i32, %chunk_var : i32, %chunk_var2 : i16) -> () { +// CHECK-LABEL: omp_loop_nest_pretty_multiple +func.func @omp_loop_nest_pretty_multiple(%lb1 : i32, %ub1 : i32, %step1 : i32, + %lb2 : i32, %ub2 : i32, %step2 : i32, %data1 : memref) -> () { - // CHECK: omp.wsloop ordered(2) - // CHECK-SAME: for (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) - omp.wsloop ordered(2) - for (%iv) : index = (%lb) to (%ub) step (%step) { - omp.yield + omp.wsloop { + // CHECK: omp.loop_nest (%{{.*}}, %{{.*}}) : i32 = (%{{.*}}, %{{.*}}) to (%{{.*}}, %{{.*}}) step (%{{.*}}, %{{.*}}) + omp.loop_nest (%iv1, %iv2) : i32 = (%lb1, %lb2) to (%ub1, %ub2) step (%step1, %step2) { + %1 = "test.payload"(%iv1) : (i32) -> (index) + %2 = "test.payload"(%iv2) : (i32) -> (index) + memref.store %iv1, %data1[%1] : memref + memref.store %iv2, %data1[%2] : memref + omp.yield + } + omp.terminator } - // CHECK: omp.wsloop linear(%{{.*}} = %{{.*}} : memref) schedule(static) - // CHECK-SAME: for (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) - omp.wsloop schedule(static) linear(%data_var = %linear_var : memref) - for (%iv) : index = (%lb) to (%ub) step (%step) { - omp.yield - } + return +} - // CHECK: omp.wsloop linear(%{{.*}} = %{{.*}} : memref) schedule(static = %{{.*}} : i32) ordered(2) - // CHECK-SAME: for (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) - omp.wsloop ordered(2) linear(%data_var = %linear_var : memref) schedule(static = %chunk_var : i32) - for (%iv) : index = (%lb) to (%ub) step (%step) { - omp.yield - } +// CHECK-LABEL: omp_wsloop +func.func @omp_wsloop(%lb : index, %ub : index, %step : index, %data_var : memref, %linear_var : i32, %chunk_var : i32) -> () { - // CHECK: omp.wsloop linear(%{{.*}} = %{{.*}} : memref) schedule(dynamic = %{{.*}} : i32, nonmonotonic) ordered(2) - // CHECK-SAME: for (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) - omp.wsloop ordered(2) linear(%data_var = %linear_var : memref) schedule(dynamic = %chunk_var : i32, nonmonotonic) - for (%iv) : index = (%lb) to (%ub) step (%step) { - omp.yield - } + // CHECK: omp.wsloop ordered(1) { + // CHECK-NEXT: omp.loop_nest + "omp.wsloop" () ({ + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + omp.yield + } + omp.terminator + }) {operandSegmentSizes = array, ordered_val = 1} : + () -> () - // CHECK: omp.wsloop linear(%{{.*}} = %{{.*}} : memref) schedule(dynamic = %{{.*}} : i16, monotonic) ordered(2) - // CHECK-SAME: for (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) - omp.wsloop ordered(2) linear(%data_var = %linear_var : memref) schedule(dynamic = %chunk_var2 : i16, monotonic) - for (%iv) : index = (%lb) to (%ub) step (%step) { - omp.yield - } + // CHECK: omp.wsloop linear(%{{.*}} = %{{.*}} : memref) schedule(static) { + // CHECK-NEXT: omp.loop_nest + "omp.wsloop" (%data_var, %linear_var) ({ + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + omp.yield + } + omp.terminator + }) {operandSegmentSizes = array, schedule_val = #omp} : + (memref, i32) -> () - // CHECK: omp.wsloop for (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) - omp.wsloop for (%iv) : index = (%lb) to (%ub) step (%step) { - omp.yield - } + // CHECK: omp.wsloop linear(%{{.*}} = %{{.*}} : memref, %{{.*}} = %{{.*}} : memref) schedule(static) { + // CHECK-NEXT: omp.loop_nest + "omp.wsloop" (%data_var, %data_var, %linear_var, %linear_var) ({ + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + omp.yield + } + omp.terminator + }) {operandSegmentSizes = array, schedule_val = #omp} : + (memref, memref, i32, i32) -> () - // CHECK: omp.wsloop for (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) inclusive step (%{{.*}}) - omp.wsloop for (%iv) : index = (%lb) to (%ub) inclusive step (%step) { - omp.yield - } + // CHECK: omp.wsloop linear(%{{.*}} = %{{.*}} : memref) schedule(dynamic = %{{.*}}) ordered(2) { + // CHECK-NEXT: omp.loop_nest + "omp.wsloop" (%data_var, %linear_var, %chunk_var) ({ + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + omp.yield + } + omp.terminator + }) {operandSegmentSizes = array, schedule_val = #omp, ordered_val = 2} : + (memref, i32, i32) -> () - // CHECK: omp.wsloop nowait - // CHECK-SAME: for (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) - omp.wsloop nowait - for (%iv) : index = (%lb) to (%ub) step (%step) { - omp.yield - } + // CHECK: omp.wsloop schedule(auto) nowait { + // CHECK-NEXT: omp.loop_nest + "omp.wsloop" () ({ + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + omp.yield + } + omp.terminator + }) {operandSegmentSizes = array, nowait, schedule_val = #omp} : + () -> () - // CHECK: omp.wsloop nowait order(concurrent) - // CHECK-SAME: for (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) - omp.wsloop order(concurrent) nowait - for (%iv) : index = (%lb) to (%ub) step (%step) { - omp.yield - } + // CHECK: omp.wsloop { + // CHECK-NEXT: omp.simd + // CHECK-NEXT: omp.loop_nest + "omp.wsloop" () ({ + omp.simd { + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + omp.yield + } + omp.terminator + } + omp.terminator + }) : () -> () return } -// CHECK-LABEL: omp_wsloop_pretty_multi_block -func.func @omp_wsloop_pretty_multi_block(%lb : index, %ub : index, %step : index, %data1 : memref, %data2 : memref) -> () { +// CHECK-LABEL: omp_wsloop_pretty +func.func @omp_wsloop_pretty(%lb : index, %ub : index, %step : index, %data_var : memref, %linear_var : i32, %chunk_var : i32, %chunk_var2 : i16) -> () { - // CHECK: omp.wsloop for (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) - omp.wsloop for (%iv) : index = (%lb) to (%ub) step (%step) { - %1 = "test.payload"(%iv) : (index) -> (i32) - cf.br ^bb1(%1: i32) - ^bb1(%arg: i32): - memref.store %arg, %data1[%iv] : memref - omp.yield + // CHECK: omp.wsloop ordered(2) { + // CHECK-NEXT: omp.loop_nest + omp.wsloop ordered(2) { + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + omp.yield + } + omp.terminator } - // CHECK: omp.wsloop for (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) - omp.wsloop for (%iv) : index = (%lb) to (%ub) step (%step) { - %c = "test.condition"(%iv) : (index) -> (i1) - %v1 = "test.payload"(%iv) : (index) -> (i32) - cf.cond_br %c, ^bb1(%v1: i32), ^bb2(%v1: i32) - ^bb1(%arg0: i32): - memref.store %arg0, %data1[%iv] : memref - cf.br ^bb3 - ^bb2(%arg1: i32): - memref.store %arg1, %data2[%iv] : memref - cf.br ^bb3 - ^bb3: - omp.yield + // CHECK: omp.wsloop linear(%{{.*}} = %{{.*}} : memref) schedule(static) { + // CHECK-NEXT: omp.loop_nest + omp.wsloop schedule(static) linear(%data_var = %linear_var : memref) { + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + omp.yield + } + omp.terminator } - // CHECK: omp.wsloop for (%{{.*}}) : index = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) - omp.wsloop for (%iv) : index = (%lb) to (%ub) step (%step) { - %c = "test.condition"(%iv) : (index) -> (i1) - %v1 = "test.payload"(%iv) : (index) -> (i32) - cf.cond_br %c, ^bb1(%v1: i32), ^bb2(%v1: i32) - ^bb1(%arg0: i32): - memref.store %arg0, %data1[%iv] : memref - omp.yield - ^bb2(%arg1: i32): - memref.store %arg1, %data2[%iv] : memref - omp.yield + // CHECK: omp.wsloop linear(%{{.*}} = %{{.*}} : memref) schedule(static = %{{.*}} : i32) ordered(2) { + // CHECK-NEXT: omp.loop_nest + omp.wsloop ordered(2) linear(%data_var = %linear_var : memref) schedule(static = %chunk_var : i32) { + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + omp.yield + } + omp.terminator } - return -} - -// CHECK-LABEL: omp_wsloop_pretty_non_index -func.func @omp_wsloop_pretty_non_index(%lb1 : i32, %ub1 : i32, %step1 : i32, %lb2 : i64, %ub2 : i64, %step2 : i64, - %data1 : memref, %data2 : memref) -> () { + // CHECK: omp.wsloop linear(%{{.*}} = %{{.*}} : memref) schedule(dynamic = %{{.*}} : i32, nonmonotonic) ordered(2) { + // CHECK-NEXT: omp.loop_nest + omp.wsloop ordered(2) linear(%data_var = %linear_var : memref) schedule(dynamic = %chunk_var : i32, nonmonotonic) { + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + omp.yield + } + omp.terminator + } - // CHECK: omp.wsloop for (%{{.*}}) : i32 = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) - omp.wsloop for (%iv1) : i32 = (%lb1) to (%ub1) step (%step1) { - %1 = "test.payload"(%iv1) : (i32) -> (index) - cf.br ^bb1(%1: index) - ^bb1(%arg1: index): - memref.store %iv1, %data1[%arg1] : memref - omp.yield + // CHECK: omp.wsloop linear(%{{.*}} = %{{.*}} : memref) schedule(dynamic = %{{.*}} : i16, monotonic) ordered(2) { + // CHECK-NEXT: omp.loop_nest + omp.wsloop ordered(2) linear(%data_var = %linear_var : memref) schedule(dynamic = %chunk_var2 : i16, monotonic) { + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + omp.yield + } + omp.terminator } - // CHECK: omp.wsloop for (%{{.*}}) : i64 = (%{{.*}}) to (%{{.*}}) step (%{{.*}}) - omp.wsloop for (%iv2) : i64 = (%lb2) to (%ub2) step (%step2) { - %2 = "test.payload"(%iv2) : (i64) -> (index) - cf.br ^bb1(%2: index) - ^bb1(%arg2: index): - memref.store %iv2, %data2[%arg2] : memref - omp.yield + // CHECK: omp.wsloop { + // CHECK-NEXT: omp.loop_nest + omp.wsloop { + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + omp.yield + } + omp.terminator } - return -} + // CHECK: omp.wsloop nowait { + // CHECK-NEXT: omp.loop_nest + omp.wsloop nowait { + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + omp.yield + } + omp.terminator + } -// CHECK-LABEL: omp_wsloop_pretty_multiple -func.func @omp_wsloop_pretty_multiple(%lb1 : i32, %ub1 : i32, %step1 : i32, %lb2 : i32, %ub2 : i32, %step2 : i32, %data1 : memref) -> () { + // CHECK: omp.wsloop nowait order(concurrent) { + // CHECK-NEXT: omp.loop_nest + omp.wsloop order(concurrent) nowait { + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + omp.yield + } + omp.terminator + } - // CHECK: omp.wsloop for (%{{.*}}, %{{.*}}) : i32 = (%{{.*}}, %{{.*}}) to (%{{.*}}, %{{.*}}) step (%{{.*}}, %{{.*}}) - omp.wsloop for (%iv1, %iv2) : i32 = (%lb1, %lb2) to (%ub1, %ub2) step (%step1, %step2) { - %1 = "test.payload"(%iv1) : (i32) -> (index) - %2 = "test.payload"(%iv2) : (i32) -> (index) - memref.store %iv1, %data1[%1] : memref - memref.store %iv2, %data1[%2] : memref - omp.yield + // CHECK: omp.wsloop { + // CHECK-NEXT: omp.simd + // CHECK-NEXT: omp.loop_nest + omp.wsloop { + omp.simd { + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + omp.yield + } + omp.terminator + } + omp.terminator } return @@ -659,7 +720,7 @@ func.func @omp_distribute(%chunk_size : i32, %data_var : memref, %arg0 : i3 // CHECK: omp.distribute omp.distribute { omp.simd { - omp.loop_nest (%iv2) : i32 = (%arg0) to (%arg0) step (%arg0) { + omp.loop_nest (%iv) : i32 = (%arg0) to (%arg0) step (%arg0) { omp.yield } } @@ -791,17 +852,19 @@ func.func @wsloop_reduction(%lb : index, %ub : index, %step : index) { %c1 = arith.constant 1 : i32 %0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr // CHECK: reduction(@add_f32 %{{.+}} -> %[[PRV:.+]] : !llvm.ptr) - omp.wsloop reduction(@add_f32 %0 -> %prv : !llvm.ptr) - for (%iv) : index = (%lb) to (%ub) step (%step) { - // CHECK: %[[CST:.+]] = arith.constant 2.0{{.*}} : f32 - %cst = arith.constant 2.0 : f32 - // CHECK: %[[LPRV:.+]] = llvm.load %[[PRV]] : !llvm.ptr -> f32 - %lprv = llvm.load %prv : !llvm.ptr -> f32 - // CHECK: %[[RES:.+]] = llvm.fadd %[[LPRV]], %[[CST]] : f32 - %res = llvm.fadd %lprv, %cst: f32 - // CHECK: llvm.store %[[RES]], %[[PRV]] : f32, !llvm.ptr - llvm.store %res, %prv : f32, !llvm.ptr - omp.yield + omp.wsloop reduction(@add_f32 %0 -> %prv : !llvm.ptr) { + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + // CHECK: %[[CST:.+]] = arith.constant 2.0{{.*}} : f32 + %cst = arith.constant 2.0 : f32 + // CHECK: %[[LPRV:.+]] = llvm.load %[[PRV]] : !llvm.ptr -> f32 + %lprv = llvm.load %prv : !llvm.ptr -> f32 + // CHECK: %[[RES:.+]] = llvm.fadd %[[LPRV]], %[[CST]] : f32 + %res = llvm.fadd %lprv, %cst: f32 + // CHECK: llvm.store %[[RES]], %[[PRV]] : f32, !llvm.ptr + llvm.store %res, %prv : f32, !llvm.ptr + omp.yield + } + omp.terminator } return } @@ -828,14 +891,19 @@ func.func @parallel_wsloop_reduction(%lb : index, %ub : index, %step : index) { %0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr // CHECK: omp.parallel reduction(@add_f32 %{{.*}} -> %{{.+}} : !llvm.ptr) { omp.parallel reduction(@add_f32 %0 -> %prv : !llvm.ptr) { - // CHECK: omp.wsloop for (%{{.+}}) : index = (%{{.+}}) to (%{{.+}}) step (%{{.+}}) - omp.wsloop for (%iv) : index = (%lb) to (%ub) step (%step) { - %1 = arith.constant 2.0 : f32 - %2 = llvm.load %prv : !llvm.ptr -> f32 - // CHECK: llvm.fadd %{{.+}}, %{{.+}} : f32 - llvm.fadd %1, %2 : f32 - // CHECK: omp.yield - omp.yield + // CHECK: omp.wsloop { + omp.wsloop { + // CHECK: omp.loop_nest (%{{.+}}) : index = (%{{.+}}) to (%{{.+}}) step (%{{.+}}) { + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + %1 = arith.constant 2.0 : f32 + %2 = llvm.load %prv : !llvm.ptr -> f32 + // CHECK: llvm.fadd %{{.+}}, %{{.+}} : f32 + llvm.fadd %1, %2 : f32 + // CHECK: omp.yield + omp.yield + } + // CHECK: omp.terminator + omp.terminator } // CHECK: omp.terminator omp.terminator @@ -959,16 +1027,18 @@ combiner { // CHECK-LABEL: func @wsloop_reduction2 func.func @wsloop_reduction2(%lb : index, %ub : index, %step : index) { %0 = memref.alloca() : memref<1xf32> - // CHECK: omp.wsloop reduction(@add2_f32 %{{.+}} -> %{{.+}} : memref<1xf32>) - omp.wsloop reduction(@add2_f32 %0 -> %prv : memref<1xf32>) - for (%iv) : index = (%lb) to (%ub) step (%step) { - %1 = arith.constant 2.0 : f32 - %2 = arith.constant 0 : index - %3 = memref.load %prv[%2] : memref<1xf32> - // CHECK: llvm.fadd - %4 = llvm.fadd %1, %3 : f32 - memref.store %4, %prv[%2] : memref<1xf32> - omp.yield + // CHECK: omp.wsloop reduction(@add2_f32 %{{.+}} -> %{{.+}} : memref<1xf32>) { + omp.wsloop reduction(@add2_f32 %0 -> %prv : memref<1xf32>) { + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + %1 = arith.constant 2.0 : f32 + %2 = arith.constant 0 : index + %3 = memref.load %prv[%2] : memref<1xf32> + // CHECK: llvm.fadd + %4 = llvm.fadd %1, %3 : f32 + memref.store %4, %prv[%2] : memref<1xf32> + omp.yield + } + omp.terminator } return } @@ -995,14 +1065,19 @@ func.func @parallel_wsloop_reduction2(%lb : index, %ub : index, %step : index) { %0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr // CHECK: omp.parallel reduction(@add2_f32 %{{.*}} -> %{{.+}} : !llvm.ptr) { omp.parallel reduction(@add2_f32 %0 -> %prv : !llvm.ptr) { - // CHECK: omp.wsloop for (%{{.+}}) : index = (%{{.+}}) to (%{{.+}}) step (%{{.+}}) - omp.wsloop for (%iv) : index = (%lb) to (%ub) step (%step) { - %1 = arith.constant 2.0 : f32 - %2 = llvm.load %prv : !llvm.ptr -> f32 - // CHECK: llvm.fadd %{{.+}}, %{{.+}} : f32 - %3 = llvm.fadd %1, %2 : f32 - // CHECK: omp.yield - omp.yield + // CHECK: omp.wsloop { + omp.wsloop { + // CHECK: omp.loop_nest (%{{.+}}) : index = (%{{.+}}) to (%{{.+}}) step (%{{.+}}) { + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + %1 = arith.constant 2.0 : f32 + %2 = llvm.load %prv : !llvm.ptr -> f32 + // CHECK: llvm.fadd %{{.+}}, %{{.+}} : f32 + %3 = llvm.fadd %1, %2 : f32 + // CHECK: omp.yield + omp.yield + } + // CHECK: omp.terminator + omp.terminator } // CHECK: omp.terminator omp.terminator @@ -1076,36 +1151,44 @@ func.func @omp_ordered(%arg1 : i32, %arg2 : i32, %arg3 : i32, omp.terminator } - omp.wsloop ordered(0) - for (%0) : i32 = (%arg1) to (%arg2) step (%arg3) { - omp.ordered.region { - omp.terminator + omp.wsloop ordered(0) { + omp.loop_nest (%0) : i32 = (%arg1) to (%arg2) step (%arg3) { + // CHECK: omp.ordered.region + omp.ordered.region { + // CHECK: omp.terminator + omp.terminator + } + omp.yield } - omp.yield + omp.terminator } - omp.wsloop ordered(1) - for (%0) : i32 = (%arg1) to (%arg2) step (%arg3) { - // Only one DEPEND(SINK: vec) clause - // CHECK: omp.ordered depend_type(dependsink) depend_vec(%{{.*}} : i64) {num_loops_val = 1 : i64} - omp.ordered depend_type(dependsink) depend_vec(%vec0 : i64) {num_loops_val = 1 : i64} + omp.wsloop ordered(1) { + omp.loop_nest (%0) : i32 = (%arg1) to (%arg2) step (%arg3) { + // Only one DEPEND(SINK: vec) clause + // CHECK: omp.ordered depend_type(dependsink) depend_vec(%{{.*}} : i64) {num_loops_val = 1 : i64} + omp.ordered depend_type(dependsink) depend_vec(%vec0 : i64) {num_loops_val = 1 : i64} - // CHECK: omp.ordered depend_type(dependsource) depend_vec(%{{.*}} : i64) {num_loops_val = 1 : i64} - omp.ordered depend_type(dependsource) depend_vec(%vec0 : i64) {num_loops_val = 1 : i64} + // CHECK: omp.ordered depend_type(dependsource) depend_vec(%{{.*}} : i64) {num_loops_val = 1 : i64} + omp.ordered depend_type(dependsource) depend_vec(%vec0 : i64) {num_loops_val = 1 : i64} - omp.yield + omp.yield + } + omp.terminator } - omp.wsloop ordered(2) - for (%0) : i32 = (%arg1) to (%arg2) step (%arg3) { - // Multiple DEPEND(SINK: vec) clauses - // CHECK: omp.ordered depend_type(dependsink) depend_vec(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} : i64, i64, i64, i64) {num_loops_val = 2 : i64} - omp.ordered depend_type(dependsink) depend_vec(%vec0, %vec1, %vec2, %vec3 : i64, i64, i64, i64) {num_loops_val = 2 : i64} + omp.wsloop ordered(2) { + omp.loop_nest (%0) : i32 = (%arg1) to (%arg2) step (%arg3) { + // Multiple DEPEND(SINK: vec) clauses + // CHECK: omp.ordered depend_type(dependsink) depend_vec(%{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} : i64, i64, i64, i64) {num_loops_val = 2 : i64} + omp.ordered depend_type(dependsink) depend_vec(%vec0, %vec1, %vec2, %vec3 : i64, i64, i64, i64) {num_loops_val = 2 : i64} - // CHECK: omp.ordered depend_type(dependsource) depend_vec(%{{.*}}, %{{.*}} : i64, i64) {num_loops_val = 2 : i64} - omp.ordered depend_type(dependsource) depend_vec(%vec0, %vec1 : i64, i64) {num_loops_val = 2 : i64} + // CHECK: omp.ordered depend_type(dependsource) depend_vec(%{{.*}}, %{{.*}} : i64, i64) {num_loops_val = 2 : i64} + omp.ordered depend_type(dependsource) depend_vec(%vec0, %vec1 : i64, i64) {num_loops_val = 2 : i64} - omp.yield + omp.yield + } + omp.terminator } return @@ -1956,11 +2039,13 @@ func.func @omp_cancel_parallel(%if_cond : i1) -> () { } func.func @omp_cancel_wsloop(%lb : index, %ub : index, %step : index) { - omp.wsloop - for (%iv) : index = (%lb) to (%ub) step (%step) { - // CHECK: omp.cancel cancellation_construct_type(loop) - omp.cancel cancellation_construct_type(loop) - // CHECK: omp.terminator + omp.wsloop { + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + // CHECK: omp.cancel cancellation_construct_type(loop) + omp.cancel cancellation_construct_type(loop) + // CHECK: omp.yield + omp.yield + } omp.terminator } return @@ -1991,13 +2076,15 @@ func.func @omp_cancellationpoint_parallel() -> () { } func.func @omp_cancellationpoint_wsloop(%lb : index, %ub : index, %step : index) { - omp.wsloop - for (%iv) : index = (%lb) to (%ub) step (%step) { - // CHECK: omp.cancellation_point cancellation_construct_type(loop) - omp.cancellation_point cancellation_construct_type(loop) - // CHECK: omp.cancel cancellation_construct_type(loop) - omp.cancel cancellation_construct_type(loop) - // CHECK: omp.terminator + omp.wsloop { + omp.loop_nest (%iv) : index = (%lb) to (%ub) step (%step) { + // CHECK: omp.cancellation_point cancellation_construct_type(loop) + omp.cancellation_point cancellation_construct_type(loop) + // CHECK: omp.cancel cancellation_construct_type(loop) + omp.cancel cancellation_construct_type(loop) + // CHECK: omp.yield + omp.yield + } omp.terminator } return diff --git a/mlir/test/Target/LLVMIR/omptarget-parallel-wsloop.mlir b/mlir/test/Target/LLVMIR/omptarget-parallel-wsloop.mlir index b0fe642238f14..360b3b0c0e60c 100644 --- a/mlir/test/Target/LLVMIR/omptarget-parallel-wsloop.mlir +++ b/mlir/test/Target/LLVMIR/omptarget-parallel-wsloop.mlir @@ -12,10 +12,13 @@ module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<"dlti.alloca_memo %loop_ub = llvm.mlir.constant(9 : i32) : i32 %loop_lb = llvm.mlir.constant(0 : i32) : i32 %loop_step = llvm.mlir.constant(1 : i32) : i32 - omp.wsloop for (%loop_cnt) : i32 = (%loop_lb) to (%loop_ub) inclusive step (%loop_step) { - %gep = llvm.getelementptr %arg0[0, %loop_cnt] : (!llvm.ptr, i32) -> !llvm.ptr, !llvm.array<10 x i32> - llvm.store %loop_cnt, %gep : i32, !llvm.ptr - omp.yield + omp.wsloop { + omp.loop_nest (%loop_cnt) : i32 = (%loop_lb) to (%loop_ub) inclusive step (%loop_step) { + %gep = llvm.getelementptr %arg0[0, %loop_cnt] : (!llvm.ptr, i32) -> !llvm.ptr, !llvm.array<10 x i32> + llvm.store %loop_cnt, %gep : i32, !llvm.ptr + omp.yield + } + omp.terminator } omp.terminator } diff --git a/mlir/test/Target/LLVMIR/omptarget-wsloop-collapsed.mlir b/mlir/test/Target/LLVMIR/omptarget-wsloop-collapsed.mlir index 0d77423abcb4f..13d34b7e58f77 100644 --- a/mlir/test/Target/LLVMIR/omptarget-wsloop-collapsed.mlir +++ b/mlir/test/Target/LLVMIR/omptarget-wsloop-collapsed.mlir @@ -8,13 +8,16 @@ module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<"dlti.alloca_memo %loop_ub = llvm.mlir.constant(99 : i32) : i32 %loop_lb = llvm.mlir.constant(0 : i32) : i32 %loop_step = llvm.mlir.constant(1 : index) : i32 - omp.wsloop for (%arg1, %arg2) : i32 = (%loop_lb, %loop_lb) to (%loop_ub, %loop_ub) inclusive step (%loop_step, %loop_step) { - %1 = llvm.add %arg1, %arg2 : i32 - %2 = llvm.mul %arg2, %loop_ub overflow : i32 - %3 = llvm.add %arg1, %2 :i32 - %4 = llvm.getelementptr %arg0[%3] : (!llvm.ptr, i32) -> !llvm.ptr, i32 - llvm.store %1, %4 : i32, !llvm.ptr - omp.yield + omp.wsloop { + omp.loop_nest (%arg1, %arg2) : i32 = (%loop_lb, %loop_lb) to (%loop_ub, %loop_ub) inclusive step (%loop_step, %loop_step) { + %1 = llvm.add %arg1, %arg2 : i32 + %2 = llvm.mul %arg2, %loop_ub overflow : i32 + %3 = llvm.add %arg1, %2 :i32 + %4 = llvm.getelementptr %arg0[%3] : (!llvm.ptr, i32) -> !llvm.ptr, i32 + llvm.store %1, %4 : i32, !llvm.ptr + omp.yield + } + omp.terminator } llvm.return } diff --git a/mlir/test/Target/LLVMIR/omptarget-wsloop.mlir b/mlir/test/Target/LLVMIR/omptarget-wsloop.mlir index 0f3f503dfa537..ee851eaf71ac0 100644 --- a/mlir/test/Target/LLVMIR/omptarget-wsloop.mlir +++ b/mlir/test/Target/LLVMIR/omptarget-wsloop.mlir @@ -8,10 +8,13 @@ module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<"dlti.alloca_memo %loop_ub = llvm.mlir.constant(9 : i32) : i32 %loop_lb = llvm.mlir.constant(0 : i32) : i32 %loop_step = llvm.mlir.constant(1 : i32) : i32 - omp.wsloop for (%loop_cnt) : i32 = (%loop_lb) to (%loop_ub) inclusive step (%loop_step) { - %gep = llvm.getelementptr %arg0[0, %loop_cnt] : (!llvm.ptr, i32) -> !llvm.ptr, !llvm.array<10 x i32> - llvm.store %loop_cnt, %gep : i32, !llvm.ptr - omp.yield + omp.wsloop { + omp.loop_nest (%loop_cnt) : i32 = (%loop_lb) to (%loop_ub) inclusive step (%loop_step) { + %gep = llvm.getelementptr %arg0[0, %loop_cnt] : (!llvm.ptr, i32) -> !llvm.ptr, !llvm.array<10 x i32> + llvm.store %loop_cnt, %gep : i32, !llvm.ptr + omp.yield + } + omp.terminator } llvm.return } @@ -20,8 +23,11 @@ module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<"dlti.alloca_memo %loop_ub = llvm.mlir.constant(9 : i32) : i32 %loop_lb = llvm.mlir.constant(0 : i32) : i32 %loop_step = llvm.mlir.constant(1 : i32) : i32 - omp.wsloop for (%loop_cnt) : i32 = (%loop_lb) to (%loop_ub) inclusive step (%loop_step) { - omp.yield + omp.wsloop { + omp.loop_nest (%loop_cnt) : i32 = (%loop_lb) to (%loop_ub) inclusive step (%loop_step) { + omp.yield + } + omp.terminator } llvm.return } diff --git a/mlir/test/Target/LLVMIR/openmp-data-target-device.mlir b/mlir/test/Target/LLVMIR/openmp-data-target-device.mlir index d41429a6de066..4ea9df369af66 100644 --- a/mlir/test/Target/LLVMIR/openmp-data-target-device.mlir +++ b/mlir/test/Target/LLVMIR/openmp-data-target-device.mlir @@ -31,20 +31,23 @@ module attributes { } { %18 = llvm.mlir.constant(1 : i64) : i64 %19 = llvm.alloca %18 x i32 {pinned} : (i64) -> !llvm.ptr<5> %20 = llvm.addrspacecast %19 : !llvm.ptr<5> to !llvm.ptr - omp.wsloop for (%arg2) : i32 = (%16) to (%15) inclusive step (%16) { - llvm.store %arg2, %20 : i32, !llvm.ptr - %21 = llvm.load %20 : !llvm.ptr -> i32 - %22 = llvm.sext %21 : i32 to i64 - %23 = llvm.mlir.constant(1 : i64) : i64 - %24 = llvm.mlir.constant(0 : i64) : i64 - %25 = llvm.sub %22, %23 overflow : i64 - %26 = llvm.mul %25, %23 overflow : i64 - %27 = llvm.mul %26, %23 overflow : i64 - %28 = llvm.add %27, %24 overflow : i64 - %29 = llvm.mul %23, %17 overflow : i64 - %30 = llvm.getelementptr %arg0[%28] : (!llvm.ptr, i64) -> !llvm.ptr, i32 - llvm.store %21, %30 : i32, !llvm.ptr - omp.yield + omp.wsloop { + omp.loop_nest (%arg2) : i32 = (%16) to (%15) inclusive step (%16) { + llvm.store %arg2, %20 : i32, !llvm.ptr + %21 = llvm.load %20 : !llvm.ptr -> i32 + %22 = llvm.sext %21 : i32 to i64 + %23 = llvm.mlir.constant(1 : i64) : i64 + %24 = llvm.mlir.constant(0 : i64) : i64 + %25 = llvm.sub %22, %23 overflow : i64 + %26 = llvm.mul %25, %23 overflow : i64 + %27 = llvm.mul %26, %23 overflow : i64 + %28 = llvm.add %27, %24 overflow : i64 + %29 = llvm.mul %23, %17 overflow : i64 + %30 = llvm.getelementptr %arg0[%28] : (!llvm.ptr, i64) -> !llvm.ptr, i32 + llvm.store %21, %30 : i32, !llvm.ptr + omp.yield + } + omp.terminator } omp.terminator } diff --git a/mlir/test/Target/LLVMIR/openmp-llvm.mlir b/mlir/test/Target/LLVMIR/openmp-llvm.mlir index d1390022c1dc4..ad40ca26bec9f 100644 --- a/mlir/test/Target/LLVMIR/openmp-llvm.mlir +++ b/mlir/test/Target/LLVMIR/openmp-llvm.mlir @@ -320,18 +320,20 @@ llvm.func @wsloop_simple(%arg0: !llvm.ptr) { %1 = llvm.mlir.constant(10 : index) : i64 %2 = llvm.mlir.constant(1 : index) : i64 omp.parallel { - "omp.wsloop"(%1, %0, %2) ({ - ^bb0(%arg1: i64): - // The form of the emitted IR is controlled by OpenMPIRBuilder and - // tested there. Just check that the right functions are called. - // CHECK: call i32 @__kmpc_global_thread_num - // CHECK: call void @__kmpc_for_static_init_{{.*}}(ptr @[[$loc_struct]], - %3 = llvm.mlir.constant(2.000000e+00 : f32) : f32 - %4 = llvm.getelementptr %arg0[%arg1] : (!llvm.ptr, i64) -> !llvm.ptr, f32 - llvm.store %3, %4 : f32, !llvm.ptr - omp.yield + "omp.wsloop"() ({ + omp.loop_nest (%arg1) : i64 = (%1) to (%0) step (%2) { + // The form of the emitted IR is controlled by OpenMPIRBuilder and + // tested there. Just check that the right functions are called. + // CHECK: call i32 @__kmpc_global_thread_num + // CHECK: call void @__kmpc_for_static_init_{{.*}}(ptr @[[$loc_struct]], + %3 = llvm.mlir.constant(2.000000e+00 : f32) : f32 + %4 = llvm.getelementptr %arg0[%arg1] : (!llvm.ptr, i64) -> !llvm.ptr, f32 + llvm.store %3, %4 : f32, !llvm.ptr + omp.yield + } + omp.terminator // CHECK: call void @__kmpc_for_static_fini(ptr @[[$loc_struct]], - }) {operandSegmentSizes = array} : (i64, i64, i64) -> () + }) : () -> () omp.terminator } llvm.return @@ -345,13 +347,15 @@ llvm.func @wsloop_inclusive_1(%arg0: !llvm.ptr) { %1 = llvm.mlir.constant(10 : index) : i64 %2 = llvm.mlir.constant(1 : index) : i64 // CHECK: store i64 31, ptr %{{.*}}upperbound - "omp.wsloop"(%1, %0, %2) ({ - ^bb0(%arg1: i64): - %3 = llvm.mlir.constant(2.000000e+00 : f32) : f32 - %4 = llvm.getelementptr %arg0[%arg1] : (!llvm.ptr, i64) -> !llvm.ptr, f32 - llvm.store %3, %4 : f32, !llvm.ptr - omp.yield - }) {operandSegmentSizes = array} : (i64, i64, i64) -> () + "omp.wsloop"() ({ + omp.loop_nest (%arg1) : i64 = (%1) to (%0) step (%2) { + %3 = llvm.mlir.constant(2.000000e+00 : f32) : f32 + %4 = llvm.getelementptr %arg0[%arg1] : (!llvm.ptr, i64) -> !llvm.ptr, f32 + llvm.store %3, %4 : f32, !llvm.ptr + omp.yield + } + omp.terminator + }) : () -> () llvm.return } @@ -363,13 +367,15 @@ llvm.func @wsloop_inclusive_2(%arg0: !llvm.ptr) { %1 = llvm.mlir.constant(10 : index) : i64 %2 = llvm.mlir.constant(1 : index) : i64 // CHECK: store i64 32, ptr %{{.*}}upperbound - "omp.wsloop"(%1, %0, %2) ({ - ^bb0(%arg1: i64): - %3 = llvm.mlir.constant(2.000000e+00 : f32) : f32 - %4 = llvm.getelementptr %arg0[%arg1] : (!llvm.ptr, i64) -> !llvm.ptr, f32 - llvm.store %3, %4 : f32, !llvm.ptr - omp.yield - }) {inclusive, operandSegmentSizes = array} : (i64, i64, i64) -> () + "omp.wsloop"() ({ + omp.loop_nest (%arg1) : i64 = (%1) to (%0) inclusive step (%2) { + %3 = llvm.mlir.constant(2.000000e+00 : f32) : f32 + %4 = llvm.getelementptr %arg0[%arg1] : (!llvm.ptr, i64) -> !llvm.ptr, f32 + llvm.store %3, %4 : f32, !llvm.ptr + omp.yield + } + omp.terminator + }) : () -> () llvm.return } @@ -379,14 +385,16 @@ llvm.func @body(i32) // CHECK-LABEL: @test_omp_wsloop_static_defchunk llvm.func @test_omp_wsloop_static_defchunk(%lb : i32, %ub : i32, %step : i32) -> () { - omp.wsloop schedule(static) - for (%iv) : i32 = (%lb) to (%ub) step (%step) { - // CHECK: call void @__kmpc_for_static_init_4u(ptr @{{.*}}, i32 %{{.*}}, i32 34, ptr %{{.*}}, ptr %{{.*}}, ptr %{{.*}}, ptr %{{.*}}, i32 1, i32 0) - // CHECK: call void @__kmpc_for_static_fini - llvm.call @body(%iv) : (i32) -> () - omp.yield - } - llvm.return + omp.wsloop schedule(static) { + omp.loop_nest (%iv) : i32 = (%lb) to (%ub) step (%step) { + // CHECK: call void @__kmpc_for_static_init_4u(ptr @{{.*}}, i32 %{{.*}}, i32 34, ptr %{{.*}}, ptr %{{.*}}, ptr %{{.*}}, ptr %{{.*}}, i32 1, i32 0) + // CHECK: call void @__kmpc_for_static_fini + llvm.call @body(%iv) : (i32) -> () + omp.yield + } + omp.terminator + } + llvm.return } // ----- @@ -395,15 +403,17 @@ llvm.func @body(i32) // CHECK-LABEL: @test_omp_wsloop_static_1 llvm.func @test_omp_wsloop_static_1(%lb : i32, %ub : i32, %step : i32) -> () { - %static_chunk_size = llvm.mlir.constant(1 : i32) : i32 - omp.wsloop schedule(static = %static_chunk_size : i32) - for (%iv) : i32 = (%lb) to (%ub) step (%step) { - // CHECK: call void @__kmpc_for_static_init_4u(ptr @{{.*}}, i32 %{{.*}}, i32 33, ptr %{{.*}}, ptr %{{.*}}, ptr %{{.*}}, ptr %{{.*}}, i32 1, i32 1) - // CHECK: call void @__kmpc_for_static_fini - llvm.call @body(%iv) : (i32) -> () - omp.yield - } - llvm.return + %static_chunk_size = llvm.mlir.constant(1 : i32) : i32 + omp.wsloop schedule(static = %static_chunk_size : i32) { + omp.loop_nest (%iv) : i32 = (%lb) to (%ub) step (%step) { + // CHECK: call void @__kmpc_for_static_init_4u(ptr @{{.*}}, i32 %{{.*}}, i32 33, ptr %{{.*}}, ptr %{{.*}}, ptr %{{.*}}, ptr %{{.*}}, i32 1, i32 1) + // CHECK: call void @__kmpc_for_static_fini + llvm.call @body(%iv) : (i32) -> () + omp.yield + } + omp.terminator + } + llvm.return } // ----- @@ -412,15 +422,17 @@ llvm.func @body(i32) // CHECK-LABEL: @test_omp_wsloop_static_2 llvm.func @test_omp_wsloop_static_2(%lb : i32, %ub : i32, %step : i32) -> () { - %static_chunk_size = llvm.mlir.constant(2 : i32) : i32 - omp.wsloop schedule(static = %static_chunk_size : i32) - for (%iv) : i32 = (%lb) to (%ub) step (%step) { - // CHECK: call void @__kmpc_for_static_init_4u(ptr @{{.*}}, i32 %{{.*}}, i32 33, ptr %{{.*}}, ptr %{{.*}}, ptr %{{.*}}, ptr %{{.*}}, i32 1, i32 2) - // CHECK: call void @__kmpc_for_static_fini - llvm.call @body(%iv) : (i32) -> () - omp.yield - } - llvm.return + %static_chunk_size = llvm.mlir.constant(2 : i32) : i32 + omp.wsloop schedule(static = %static_chunk_size : i32) { + omp.loop_nest (%iv) : i32 = (%lb) to (%ub) step (%step) { + // CHECK: call void @__kmpc_for_static_init_4u(ptr @{{.*}}, i32 %{{.*}}, i32 33, ptr %{{.*}}, ptr %{{.*}}, ptr %{{.*}}, ptr %{{.*}}, i32 1, i32 2) + // CHECK: call void @__kmpc_for_static_fini + llvm.call @body(%iv) : (i32) -> () + omp.yield + } + omp.terminator + } + llvm.return } // ----- @@ -428,16 +440,18 @@ llvm.func @test_omp_wsloop_static_2(%lb : i32, %ub : i32, %step : i32) -> () { llvm.func @body(i64) llvm.func @test_omp_wsloop_dynamic(%lb : i64, %ub : i64, %step : i64) -> () { - omp.wsloop schedule(dynamic) - for (%iv) : i64 = (%lb) to (%ub) step (%step) { - // CHECK: call void @__kmpc_dispatch_init_8u - // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u - // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 - // CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} - llvm.call @body(%iv) : (i64) -> () - omp.yield - } - llvm.return + omp.wsloop schedule(dynamic) { + omp.loop_nest (%iv) : i64 = (%lb) to (%ub) step (%step) { + // CHECK: call void @__kmpc_dispatch_init_8u + // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u + // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 + // CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} + llvm.call @body(%iv) : (i64) -> () + omp.yield + } + omp.terminator + } + llvm.return } // ----- @@ -445,17 +459,19 @@ llvm.func @test_omp_wsloop_dynamic(%lb : i64, %ub : i64, %step : i64) -> () { llvm.func @body(i64) llvm.func @test_omp_wsloop_dynamic_chunk_const(%lb : i64, %ub : i64, %step : i64) -> () { - %chunk_size_const = llvm.mlir.constant(2 : i16) : i16 - omp.wsloop schedule(dynamic = %chunk_size_const : i16) - for (%iv) : i64 = (%lb) to (%ub) step (%step) { - // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741859, i64 {{.*}}, i64 %{{.*}}, i64 {{.*}}, i64 2) - // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u - // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 - // CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} - llvm.call @body(%iv) : (i64) -> () - omp.yield - } - llvm.return + %chunk_size_const = llvm.mlir.constant(2 : i16) : i16 + omp.wsloop schedule(dynamic = %chunk_size_const : i16) { + omp.loop_nest (%iv) : i64 = (%lb) to (%ub) step (%step) { + // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741859, i64 {{.*}}, i64 %{{.*}}, i64 {{.*}}, i64 2) + // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u + // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 + // CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} + llvm.call @body(%iv) : (i64) -> () + omp.yield + } + omp.terminator + } + llvm.return } // ----- @@ -463,20 +479,22 @@ llvm.func @test_omp_wsloop_dynamic_chunk_const(%lb : i64, %ub : i64, %step : i64 llvm.func @body(i32) llvm.func @test_omp_wsloop_dynamic_chunk_var(%lb : i32, %ub : i32, %step : i32) -> () { - %1 = llvm.mlir.constant(1 : i64) : i64 - %chunk_size_alloca = llvm.alloca %1 x i16 {bindc_name = "chunk_size", in_type = i16, uniq_name = "_QFsub1Echunk_size"} : (i64) -> !llvm.ptr - %chunk_size_var = llvm.load %chunk_size_alloca : !llvm.ptr -> i16 - omp.wsloop schedule(dynamic = %chunk_size_var : i16) - for (%iv) : i32 = (%lb) to (%ub) step (%step) { - // CHECK: %[[CHUNK_SIZE:.*]] = sext i16 %{{.*}} to i32 - // CHECK: call void @__kmpc_dispatch_init_4u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741859, i32 {{.*}}, i32 %{{.*}}, i32 {{.*}}, i32 %[[CHUNK_SIZE]]) - // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_4u - // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 - // CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} - llvm.call @body(%iv) : (i32) -> () - omp.yield - } - llvm.return + %1 = llvm.mlir.constant(1 : i64) : i64 + %chunk_size_alloca = llvm.alloca %1 x i16 {bindc_name = "chunk_size", in_type = i16, uniq_name = "_QFsub1Echunk_size"} : (i64) -> !llvm.ptr + %chunk_size_var = llvm.load %chunk_size_alloca : !llvm.ptr -> i16 + omp.wsloop schedule(dynamic = %chunk_size_var : i16) { + omp.loop_nest (%iv) : i32 = (%lb) to (%ub) step (%step) { + // CHECK: %[[CHUNK_SIZE:.*]] = sext i16 %{{.*}} to i32 + // CHECK: call void @__kmpc_dispatch_init_4u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741859, i32 {{.*}}, i32 %{{.*}}, i32 {{.*}}, i32 %[[CHUNK_SIZE]]) + // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_4u + // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 + // CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} + llvm.call @body(%iv) : (i32) -> () + omp.yield + } + omp.terminator + } + llvm.return } // ----- @@ -484,20 +502,22 @@ llvm.func @test_omp_wsloop_dynamic_chunk_var(%lb : i32, %ub : i32, %step : i32) llvm.func @body(i32) llvm.func @test_omp_wsloop_dynamic_chunk_var2(%lb : i32, %ub : i32, %step : i32) -> () { - %1 = llvm.mlir.constant(1 : i64) : i64 - %chunk_size_alloca = llvm.alloca %1 x i64 {bindc_name = "chunk_size", in_type = i64, uniq_name = "_QFsub1Echunk_size"} : (i64) -> !llvm.ptr - %chunk_size_var = llvm.load %chunk_size_alloca : !llvm.ptr -> i64 - omp.wsloop schedule(dynamic = %chunk_size_var : i64) - for (%iv) : i32 = (%lb) to (%ub) step (%step) { - // CHECK: %[[CHUNK_SIZE:.*]] = trunc i64 %{{.*}} to i32 - // CHECK: call void @__kmpc_dispatch_init_4u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741859, i32 {{.*}}, i32 %{{.*}}, i32 {{.*}}, i32 %[[CHUNK_SIZE]]) - // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_4u - // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 - // CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} - llvm.call @body(%iv) : (i32) -> () - omp.yield - } - llvm.return + %1 = llvm.mlir.constant(1 : i64) : i64 + %chunk_size_alloca = llvm.alloca %1 x i64 {bindc_name = "chunk_size", in_type = i64, uniq_name = "_QFsub1Echunk_size"} : (i64) -> !llvm.ptr + %chunk_size_var = llvm.load %chunk_size_alloca : !llvm.ptr -> i64 + omp.wsloop schedule(dynamic = %chunk_size_var : i64) { + omp.loop_nest (%iv) : i32 = (%lb) to (%ub) step (%step) { + // CHECK: %[[CHUNK_SIZE:.*]] = trunc i64 %{{.*}} to i32 + // CHECK: call void @__kmpc_dispatch_init_4u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741859, i32 {{.*}}, i32 %{{.*}}, i32 {{.*}}, i32 %[[CHUNK_SIZE]]) + // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_4u + // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 + // CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} + llvm.call @body(%iv) : (i32) -> () + omp.yield + } + omp.terminator + } + llvm.return } // ----- @@ -505,16 +525,18 @@ llvm.func @test_omp_wsloop_dynamic_chunk_var2(%lb : i32, %ub : i32, %step : i32) llvm.func @body(i32) llvm.func @test_omp_wsloop_dynamic_chunk_var3(%lb : i32, %ub : i32, %step : i32, %chunk_size : i32) -> () { - omp.wsloop schedule(dynamic = %chunk_size : i32) - for (%iv) : i32 = (%lb) to (%ub) step (%step) { - // CHECK: call void @__kmpc_dispatch_init_4u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741859, i32 {{.*}}, i32 %{{.*}}, i32 {{.*}}, i32 %{{.*}}) - // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_4u - // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 - // CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} - llvm.call @body(%iv) : (i32) -> () - omp.yield - } - llvm.return + omp.wsloop schedule(dynamic = %chunk_size : i32) { + omp.loop_nest (%iv) : i32 = (%lb) to (%ub) step (%step) { + // CHECK: call void @__kmpc_dispatch_init_4u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741859, i32 {{.*}}, i32 %{{.*}}, i32 {{.*}}, i32 %{{.*}}) + // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_4u + // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 + // CHECK: br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} + llvm.call @body(%iv) : (i32) -> () + omp.yield + } + omp.terminator + } + llvm.return } // ----- @@ -522,16 +544,18 @@ llvm.func @test_omp_wsloop_dynamic_chunk_var3(%lb : i32, %ub : i32, %step : i32, llvm.func @body(i64) llvm.func @test_omp_wsloop_auto(%lb : i64, %ub : i64, %step : i64) -> () { - omp.wsloop schedule(auto) - for (%iv) : i64 = (%lb) to (%ub) step (%step) { - // CHECK: call void @__kmpc_dispatch_init_8u - // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u - // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 - // CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} - llvm.call @body(%iv) : (i64) -> () - omp.yield - } - llvm.return + omp.wsloop schedule(auto) { + omp.loop_nest (%iv) : i64 = (%lb) to (%ub) step (%step) { + // CHECK: call void @__kmpc_dispatch_init_8u + // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u + // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 + // CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} + llvm.call @body(%iv) : (i64) -> () + omp.yield + } + omp.terminator + } + llvm.return } // ----- @@ -539,14 +563,16 @@ llvm.func @test_omp_wsloop_auto(%lb : i64, %ub : i64, %step : i64) -> () { llvm.func @body(i64) llvm.func @test_omp_wsloop_runtime(%lb : i64, %ub : i64, %step : i64) -> () { - omp.wsloop schedule(runtime) - for (%iv) : i64 = (%lb) to (%ub) step (%step) { - // CHECK: call void @__kmpc_dispatch_init_8u - // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u - // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 - // CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} - llvm.call @body(%iv) : (i64) -> () - omp.yield + omp.wsloop schedule(runtime) { + omp.loop_nest (%iv) : i64 = (%lb) to (%ub) step (%step) { + // CHECK: call void @__kmpc_dispatch_init_8u + // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u + // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 + // CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} + llvm.call @body(%iv) : (i64) -> () + omp.yield + } + omp.terminator } llvm.return } @@ -556,14 +582,16 @@ llvm.func @test_omp_wsloop_runtime(%lb : i64, %ub : i64, %step : i64) -> () { llvm.func @body(i64) llvm.func @test_omp_wsloop_guided(%lb : i64, %ub : i64, %step : i64) -> () { - omp.wsloop schedule(guided) - for (%iv) : i64 = (%lb) to (%ub) step (%step) { - // CHECK: call void @__kmpc_dispatch_init_8u - // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u - // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 - // CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} - llvm.call @body(%iv) : (i64) -> () - omp.yield + omp.wsloop schedule(guided) { + omp.loop_nest (%iv) : i64 = (%lb) to (%ub) step (%step) { + // CHECK: call void @__kmpc_dispatch_init_8u + // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u + // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 + // CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} + llvm.call @body(%iv) : (i64) -> () + omp.yield + } + omp.terminator } llvm.return } @@ -573,14 +601,16 @@ llvm.func @test_omp_wsloop_guided(%lb : i64, %ub : i64, %step : i64) -> () { llvm.func @body(i64) llvm.func @test_omp_wsloop_dynamic_nonmonotonic(%lb : i64, %ub : i64, %step : i64) -> () { - omp.wsloop schedule(dynamic, nonmonotonic) - for (%iv) : i64 = (%lb) to (%ub) step (%step) { - // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741859 - // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u - // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 - // CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} - llvm.call @body(%iv) : (i64) -> () - omp.yield + omp.wsloop schedule(dynamic, nonmonotonic) { + omp.loop_nest (%iv) : i64 = (%lb) to (%ub) step (%step) { + // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741859 + // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u + // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 + // CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} + llvm.call @body(%iv) : (i64) -> () + omp.yield + } + omp.terminator } llvm.return } @@ -590,14 +620,16 @@ llvm.func @test_omp_wsloop_dynamic_nonmonotonic(%lb : i64, %ub : i64, %step : i6 llvm.func @body(i64) llvm.func @test_omp_wsloop_dynamic_monotonic(%lb : i64, %ub : i64, %step : i64) -> () { - omp.wsloop schedule(dynamic, monotonic) - for (%iv) : i64 = (%lb) to (%ub) step (%step) { - // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 536870947 - // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u - // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 - // CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} - llvm.call @body(%iv) : (i64) -> () - omp.yield + omp.wsloop schedule(dynamic, monotonic) { + omp.loop_nest (%iv) : i64 = (%lb) to (%ub) step (%step) { + // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 536870947 + // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u + // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 + // CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} + llvm.call @body(%iv) : (i64) -> () + omp.yield + } + omp.terminator } llvm.return } @@ -607,14 +639,16 @@ llvm.func @test_omp_wsloop_dynamic_monotonic(%lb : i64, %ub : i64, %step : i64) llvm.func @body(i64) llvm.func @test_omp_wsloop_runtime_simd(%lb : i64, %ub : i64, %step : i64) -> () { - omp.wsloop schedule(runtime, simd) - for (%iv) : i64 = (%lb) to (%ub) step (%step) { - // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741871 - // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u - // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 - // CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} - llvm.call @body(%iv) : (i64) -> () - omp.yield + omp.wsloop schedule(runtime, simd) { + omp.loop_nest (%iv) : i64 = (%lb) to (%ub) step (%step) { + // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741871 + // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u + // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 + // CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} + llvm.call @body(%iv) : (i64) -> () + omp.yield + } + omp.terminator } llvm.return } @@ -624,14 +658,16 @@ llvm.func @test_omp_wsloop_runtime_simd(%lb : i64, %ub : i64, %step : i64) -> () llvm.func @body(i64) llvm.func @test_omp_wsloop_guided_simd(%lb : i64, %ub : i64, %step : i64) -> () { - omp.wsloop schedule(guided, simd) - for (%iv) : i64 = (%lb) to (%ub) step (%step) { - // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741870 - // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u - // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 - // CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} - llvm.call @body(%iv) : (i64) -> () - omp.yield + omp.wsloop schedule(guided, simd) { + omp.loop_nest (%iv) : i64 = (%lb) to (%ub) step (%step) { + // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741870 + // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u + // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 + // CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} + llvm.call @body(%iv) : (i64) -> () + omp.yield + } + omp.terminator } llvm.return } @@ -793,17 +829,19 @@ llvm.func @simd_if(%arg0: !llvm.ptr {fir.bindc_name = "n"}, %arg1: !llvm.ptr {fi llvm.func @body(i64) llvm.func @test_omp_wsloop_ordered(%lb : i64, %ub : i64, %step : i64) -> () { - omp.wsloop ordered(0) - for (%iv) : i64 = (%lb) to (%ub) step (%step) { - // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 66, i64 1, i64 %{{.*}}, i64 1, i64 1) - // CHECK: call void @__kmpc_dispatch_fini_8u - // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u - // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 - // CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} - llvm.call @body(%iv) : (i64) -> () - omp.yield - } - llvm.return + omp.wsloop ordered(0) { + omp.loop_nest (%iv) : i64 = (%lb) to (%ub) step (%step) { + // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 66, i64 1, i64 %{{.*}}, i64 1, i64 1) + // CHECK: call void @__kmpc_dispatch_fini_8u + // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u + // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 + // CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} + llvm.call @body(%iv) : (i64) -> () + omp.yield + } + omp.terminator + } + llvm.return } // ----- @@ -811,17 +849,19 @@ llvm.func @test_omp_wsloop_ordered(%lb : i64, %ub : i64, %step : i64) -> () { llvm.func @body(i64) llvm.func @test_omp_wsloop_static_ordered(%lb : i64, %ub : i64, %step : i64) -> () { - omp.wsloop schedule(static) ordered(0) - for (%iv) : i64 = (%lb) to (%ub) step (%step) { - // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 66, i64 1, i64 %{{.*}}, i64 1, i64 1) - // CHECK: call void @__kmpc_dispatch_fini_8u - // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u - // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 - // CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} - llvm.call @body(%iv) : (i64) -> () - omp.yield - } - llvm.return + omp.wsloop schedule(static) ordered(0) { + omp.loop_nest (%iv) : i64 = (%lb) to (%ub) step (%step) { + // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 66, i64 1, i64 %{{.*}}, i64 1, i64 1) + // CHECK: call void @__kmpc_dispatch_fini_8u + // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u + // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 + // CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} + llvm.call @body(%iv) : (i64) -> () + omp.yield + } + omp.terminator + } + llvm.return } // ----- @@ -829,18 +869,20 @@ llvm.func @test_omp_wsloop_static_ordered(%lb : i64, %ub : i64, %step : i64) -> llvm.func @body(i32) llvm.func @test_omp_wsloop_static_chunk_ordered(%lb : i32, %ub : i32, %step : i32) -> () { - %static_chunk_size = llvm.mlir.constant(1 : i32) : i32 - omp.wsloop schedule(static = %static_chunk_size : i32) ordered(0) - for (%iv) : i32 = (%lb) to (%ub) step (%step) { - // CHECK: call void @__kmpc_dispatch_init_4u(ptr @{{.*}}, i32 %{{.*}}, i32 65, i32 1, i32 %{{.*}}, i32 1, i32 1) - // CHECK: call void @__kmpc_dispatch_fini_4u - // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_4u - // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 - // CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} - llvm.call @body(%iv) : (i32) -> () - omp.yield - } - llvm.return + %static_chunk_size = llvm.mlir.constant(1 : i32) : i32 + omp.wsloop schedule(static = %static_chunk_size : i32) ordered(0) { + omp.loop_nest (%iv) : i32 = (%lb) to (%ub) step (%step) { + // CHECK: call void @__kmpc_dispatch_init_4u(ptr @{{.*}}, i32 %{{.*}}, i32 65, i32 1, i32 %{{.*}}, i32 1, i32 1) + // CHECK: call void @__kmpc_dispatch_fini_4u + // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_4u + // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 + // CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} + llvm.call @body(%iv) : (i32) -> () + omp.yield + } + omp.terminator + } + llvm.return } // ----- @@ -848,17 +890,19 @@ llvm.func @test_omp_wsloop_static_chunk_ordered(%lb : i32, %ub : i32, %step : i3 llvm.func @body(i64) llvm.func @test_omp_wsloop_dynamic_ordered(%lb : i64, %ub : i64, %step : i64) -> () { - omp.wsloop schedule(dynamic) ordered(0) - for (%iv) : i64 = (%lb) to (%ub) step (%step) { - // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 67, i64 1, i64 %{{.*}}, i64 1, i64 1) - // CHECK: call void @__kmpc_dispatch_fini_8u - // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u - // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 - // CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} - llvm.call @body(%iv) : (i64) -> () - omp.yield - } - llvm.return + omp.wsloop schedule(dynamic) ordered(0) { + omp.loop_nest (%iv) : i64 = (%lb) to (%ub) step (%step) { + // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 67, i64 1, i64 %{{.*}}, i64 1, i64 1) + // CHECK: call void @__kmpc_dispatch_fini_8u + // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u + // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 + // CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} + llvm.call @body(%iv) : (i64) -> () + omp.yield + } + omp.terminator + } + llvm.return } // ----- @@ -866,17 +910,19 @@ llvm.func @test_omp_wsloop_dynamic_ordered(%lb : i64, %ub : i64, %step : i64) -> llvm.func @body(i64) llvm.func @test_omp_wsloop_auto_ordered(%lb : i64, %ub : i64, %step : i64) -> () { - omp.wsloop schedule(auto) ordered(0) - for (%iv) : i64 = (%lb) to (%ub) step (%step) { - // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 70, i64 1, i64 %{{.*}}, i64 1, i64 1) - // CHECK: call void @__kmpc_dispatch_fini_8u - // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u - // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 - // CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} - llvm.call @body(%iv) : (i64) -> () - omp.yield - } - llvm.return + omp.wsloop schedule(auto) ordered(0) { + omp.loop_nest (%iv) : i64 = (%lb) to (%ub) step (%step) { + // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 70, i64 1, i64 %{{.*}}, i64 1, i64 1) + // CHECK: call void @__kmpc_dispatch_fini_8u + // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u + // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 + // CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} + llvm.call @body(%iv) : (i64) -> () + omp.yield + } + omp.terminator + } + llvm.return } // ----- @@ -884,17 +930,19 @@ llvm.func @test_omp_wsloop_auto_ordered(%lb : i64, %ub : i64, %step : i64) -> () llvm.func @body(i64) llvm.func @test_omp_wsloop_runtime_ordered(%lb : i64, %ub : i64, %step : i64) -> () { - omp.wsloop schedule(runtime) ordered(0) - for (%iv) : i64 = (%lb) to (%ub) step (%step) { - // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 69, i64 1, i64 %{{.*}}, i64 1, i64 1) - // CHECK: call void @__kmpc_dispatch_fini_8u - // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u - // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 - // CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} - llvm.call @body(%iv) : (i64) -> () - omp.yield - } - llvm.return + omp.wsloop schedule(runtime) ordered(0) { + omp.loop_nest (%iv) : i64 = (%lb) to (%ub) step (%step) { + // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 69, i64 1, i64 %{{.*}}, i64 1, i64 1) + // CHECK: call void @__kmpc_dispatch_fini_8u + // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u + // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 + // CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} + llvm.call @body(%iv) : (i64) -> () + omp.yield + } + omp.terminator + } + llvm.return } // ----- @@ -902,17 +950,19 @@ llvm.func @test_omp_wsloop_runtime_ordered(%lb : i64, %ub : i64, %step : i64) -> llvm.func @body(i64) llvm.func @test_omp_wsloop_guided_ordered(%lb : i64, %ub : i64, %step : i64) -> () { - omp.wsloop schedule(guided) ordered(0) - for (%iv) : i64 = (%lb) to (%ub) step (%step) { - // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 68, i64 1, i64 %{{.*}}, i64 1, i64 1) - // CHECK: call void @__kmpc_dispatch_fini_8u - // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u - // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 - // CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} - llvm.call @body(%iv) : (i64) -> () - omp.yield - } - llvm.return + omp.wsloop schedule(guided) ordered(0) { + omp.loop_nest (%iv) : i64 = (%lb) to (%ub) step (%step) { + // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 68, i64 1, i64 %{{.*}}, i64 1, i64 1) + // CHECK: call void @__kmpc_dispatch_fini_8u + // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u + // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 + // CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} + llvm.call @body(%iv) : (i64) -> () + omp.yield + } + omp.terminator + } + llvm.return } // ----- @@ -920,17 +970,19 @@ llvm.func @test_omp_wsloop_guided_ordered(%lb : i64, %ub : i64, %step : i64) -> llvm.func @body(i64) llvm.func @test_omp_wsloop_dynamic_nonmonotonic_ordered(%lb : i64, %ub : i64, %step : i64) -> () { - omp.wsloop schedule(dynamic, nonmonotonic) ordered(0) - for (%iv) : i64 = (%lb) to (%ub) step (%step) { - // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741891, i64 1, i64 %{{.*}}, i64 1, i64 1) - // CHECK: call void @__kmpc_dispatch_fini_8u - // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u - // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 - // CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} - llvm.call @body(%iv) : (i64) -> () - omp.yield - } - llvm.return + omp.wsloop schedule(dynamic, nonmonotonic) ordered(0) { + omp.loop_nest (%iv) : i64 = (%lb) to (%ub) step (%step) { + // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 1073741891, i64 1, i64 %{{.*}}, i64 1, i64 1) + // CHECK: call void @__kmpc_dispatch_fini_8u + // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u + // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 + // CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} + llvm.call @body(%iv) : (i64) -> () + omp.yield + } + omp.terminator + } + llvm.return } // ----- @@ -938,17 +990,19 @@ llvm.func @test_omp_wsloop_dynamic_nonmonotonic_ordered(%lb : i64, %ub : i64, %s llvm.func @body(i64) llvm.func @test_omp_wsloop_dynamic_monotonic_ordered(%lb : i64, %ub : i64, %step : i64) -> () { - omp.wsloop schedule(dynamic, monotonic) ordered(0) - for (%iv) : i64 = (%lb) to (%ub) step (%step) { - // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 536870979, i64 1, i64 %{{.*}}, i64 1, i64 1) - // CHECK: call void @__kmpc_dispatch_fini_8u - // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u - // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 - // CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} - llvm.call @body(%iv) : (i64) -> () - omp.yield - } - llvm.return + omp.wsloop schedule(dynamic, monotonic) ordered(0) { + omp.loop_nest (%iv) : i64 = (%lb) to (%ub) step (%step) { + // CHECK: call void @__kmpc_dispatch_init_8u(ptr @{{.*}}, i32 %{{.*}}, i32 536870979, i64 1, i64 %{{.*}}, i64 1, i64 1) + // CHECK: call void @__kmpc_dispatch_fini_8u + // CHECK: %[[continue:.*]] = call i32 @__kmpc_dispatch_next_8u + // CHECK: %[[cond:.*]] = icmp ne i32 %[[continue]], 0 + // CHECK br i1 %[[cond]], label %omp_loop.header{{.*}}, label %omp_loop.exit{{.*}} + llvm.call @body(%iv) : (i64) -> () + omp.yield + } + omp.terminator + } + llvm.return } // ----- @@ -1114,14 +1168,16 @@ llvm.func @collapse_wsloop( // CHECK: %[[TOTAL_SUB_1:.*]] = sub i32 %[[TOTAL]], 1 // CHECK: store i32 %[[TOTAL_SUB_1]], ptr // CHECK: call void @__kmpc_for_static_init_4u - omp.wsloop - for (%arg0, %arg1, %arg2) : i32 = (%0, %1, %2) to (%3, %4, %5) step (%6, %7, %8) { - %31 = llvm.load %20 : !llvm.ptr -> i32 - %32 = llvm.add %31, %arg0 : i32 - %33 = llvm.add %32, %arg1 : i32 - %34 = llvm.add %33, %arg2 : i32 - llvm.store %34, %20 : i32, !llvm.ptr - omp.yield + omp.wsloop { + omp.loop_nest (%arg0, %arg1, %arg2) : i32 = (%0, %1, %2) to (%3, %4, %5) step (%6, %7, %8) { + %31 = llvm.load %20 : !llvm.ptr -> i32 + %32 = llvm.add %31, %arg0 : i32 + %33 = llvm.add %32, %arg1 : i32 + %34 = llvm.add %33, %arg2 : i32 + llvm.store %34, %20 : i32, !llvm.ptr + omp.yield + } + omp.terminator } omp.terminator } @@ -1175,14 +1231,16 @@ llvm.func @collapse_wsloop_dynamic( // CHECK: store i32 1, ptr // CHECK: store i32 %[[TOTAL]], ptr // CHECK: call void @__kmpc_dispatch_init_4u - omp.wsloop schedule(dynamic) - for (%arg0, %arg1, %arg2) : i32 = (%0, %1, %2) to (%3, %4, %5) step (%6, %7, %8) { - %31 = llvm.load %20 : !llvm.ptr -> i32 - %32 = llvm.add %31, %arg0 : i32 - %33 = llvm.add %32, %arg1 : i32 - %34 = llvm.add %33, %arg2 : i32 - llvm.store %34, %20 : i32, !llvm.ptr - omp.yield + omp.wsloop schedule(dynamic) { + omp.loop_nest (%arg0, %arg1, %arg2) : i32 = (%0, %1, %2) to (%3, %4, %5) step (%6, %7, %8) { + %31 = llvm.load %20 : !llvm.ptr -> i32 + %32 = llvm.add %31, %arg0 : i32 + %33 = llvm.add %32, %arg1 : i32 + %34 = llvm.add %33, %arg2 : i32 + llvm.store %34, %20 : i32, !llvm.ptr + omp.yield + } + omp.terminator } omp.terminator } @@ -1207,63 +1265,69 @@ llvm.func @omp_ordered(%arg0 : i32, %arg1 : i32, %arg2 : i32, %arg3 : i64, // CHECK: call void @__kmpc_end_ordered(ptr @[[GLOB1]], i32 [[OMP_THREAD]]) } - omp.wsloop ordered(0) - for (%arg7) : i32 = (%arg0) to (%arg1) step (%arg2) { - // CHECK: call void @__kmpc_ordered(ptr @[[GLOB3:[0-9]+]], i32 [[OMP_THREAD2:%.*]]) - omp.ordered.region { - omp.terminator - // CHECK: call void @__kmpc_end_ordered(ptr @[[GLOB3]], i32 [[OMP_THREAD2]]) + omp.wsloop ordered(0) { + omp.loop_nest (%arg7) : i32 = (%arg0) to (%arg1) step (%arg2) { + // CHECK: call void @__kmpc_ordered(ptr @[[GLOB3:[0-9]+]], i32 [[OMP_THREAD2:%.*]]) + omp.ordered.region { + omp.terminator + // CHECK: call void @__kmpc_end_ordered(ptr @[[GLOB3]], i32 [[OMP_THREAD2]]) + } + omp.yield } - omp.yield + omp.terminator } - omp.wsloop ordered(1) - for (%arg7) : i32 = (%arg0) to (%arg1) step (%arg2) { - // CHECK: [[TMP:%.*]] = getelementptr inbounds [1 x i64], ptr [[ADDR]], i64 0, i64 0 - // CHECK: store i64 [[ARG0:%.*]], ptr [[TMP]], align 8 - // CHECK: [[TMP2:%.*]] = getelementptr inbounds [1 x i64], ptr [[ADDR]], i64 0, i64 0 - // CHECK: [[OMP_THREAD2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB3:[0-9]+]]) - // CHECK: call void @__kmpc_doacross_wait(ptr @[[GLOB3]], i32 [[OMP_THREAD2]], ptr [[TMP2]]) - omp.ordered depend_type(dependsink) depend_vec(%arg3 : i64) {num_loops_val = 1 : i64} + omp.wsloop ordered(1) { + omp.loop_nest (%arg7) : i32 = (%arg0) to (%arg1) step (%arg2) { + // CHECK: [[TMP:%.*]] = getelementptr inbounds [1 x i64], ptr [[ADDR]], i64 0, i64 0 + // CHECK: store i64 [[ARG0:%.*]], ptr [[TMP]], align 8 + // CHECK: [[TMP2:%.*]] = getelementptr inbounds [1 x i64], ptr [[ADDR]], i64 0, i64 0 + // CHECK: [[OMP_THREAD2:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB3:[0-9]+]]) + // CHECK: call void @__kmpc_doacross_wait(ptr @[[GLOB3]], i32 [[OMP_THREAD2]], ptr [[TMP2]]) + omp.ordered depend_type(dependsink) depend_vec(%arg3 : i64) {num_loops_val = 1 : i64} - // CHECK: [[TMP3:%.*]] = getelementptr inbounds [1 x i64], ptr [[ADDR3]], i64 0, i64 0 - // CHECK: store i64 [[ARG0]], ptr [[TMP3]], align 8 - // CHECK: [[TMP4:%.*]] = getelementptr inbounds [1 x i64], ptr [[ADDR3]], i64 0, i64 0 - // CHECK: [[OMP_THREAD4:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB5:[0-9]+]]) - // CHECK: call void @__kmpc_doacross_post(ptr @[[GLOB5]], i32 [[OMP_THREAD4]], ptr [[TMP4]]) - omp.ordered depend_type(dependsource) depend_vec(%arg3 : i64) {num_loops_val = 1 : i64} + // CHECK: [[TMP3:%.*]] = getelementptr inbounds [1 x i64], ptr [[ADDR3]], i64 0, i64 0 + // CHECK: store i64 [[ARG0]], ptr [[TMP3]], align 8 + // CHECK: [[TMP4:%.*]] = getelementptr inbounds [1 x i64], ptr [[ADDR3]], i64 0, i64 0 + // CHECK: [[OMP_THREAD4:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB5:[0-9]+]]) + // CHECK: call void @__kmpc_doacross_post(ptr @[[GLOB5]], i32 [[OMP_THREAD4]], ptr [[TMP4]]) + omp.ordered depend_type(dependsource) depend_vec(%arg3 : i64) {num_loops_val = 1 : i64} - omp.yield + omp.yield + } + omp.terminator } - omp.wsloop ordered(2) - for (%arg7) : i32 = (%arg0) to (%arg1) step (%arg2) { - // CHECK: [[TMP5:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR5]], i64 0, i64 0 - // CHECK: store i64 [[ARG0]], ptr [[TMP5]], align 8 - // CHECK: [[TMP6:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR5]], i64 0, i64 1 - // CHECK: store i64 [[ARG1:%.*]], ptr [[TMP6]], align 8 - // CHECK: [[TMP7:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR5]], i64 0, i64 0 - // CHECK: [[OMP_THREAD6:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB7:[0-9]+]]) - // CHECK: call void @__kmpc_doacross_wait(ptr @[[GLOB7]], i32 [[OMP_THREAD6]], ptr [[TMP7]]) - // CHECK: [[TMP8:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR7]], i64 0, i64 0 - // CHECK: store i64 [[ARG2:%.*]], ptr [[TMP8]], align 8 - // CHECK: [[TMP9:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR7]], i64 0, i64 1 - // CHECK: store i64 [[ARG3:%.*]], ptr [[TMP9]], align 8 - // CHECK: [[TMP10:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR7]], i64 0, i64 0 - // CHECK: [[OMP_THREAD8:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB7]]) - // CHECK: call void @__kmpc_doacross_wait(ptr @[[GLOB7]], i32 [[OMP_THREAD8]], ptr [[TMP10]]) - omp.ordered depend_type(dependsink) depend_vec(%arg3, %arg4, %arg5, %arg6 : i64, i64, i64, i64) {num_loops_val = 2 : i64} + omp.wsloop ordered(2) { + omp.loop_nest (%arg7) : i32 = (%arg0) to (%arg1) step (%arg2) { + // CHECK: [[TMP5:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR5]], i64 0, i64 0 + // CHECK: store i64 [[ARG0]], ptr [[TMP5]], align 8 + // CHECK: [[TMP6:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR5]], i64 0, i64 1 + // CHECK: store i64 [[ARG1:%.*]], ptr [[TMP6]], align 8 + // CHECK: [[TMP7:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR5]], i64 0, i64 0 + // CHECK: [[OMP_THREAD6:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB7:[0-9]+]]) + // CHECK: call void @__kmpc_doacross_wait(ptr @[[GLOB7]], i32 [[OMP_THREAD6]], ptr [[TMP7]]) + // CHECK: [[TMP8:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR7]], i64 0, i64 0 + // CHECK: store i64 [[ARG2:%.*]], ptr [[TMP8]], align 8 + // CHECK: [[TMP9:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR7]], i64 0, i64 1 + // CHECK: store i64 [[ARG3:%.*]], ptr [[TMP9]], align 8 + // CHECK: [[TMP10:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR7]], i64 0, i64 0 + // CHECK: [[OMP_THREAD8:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB7]]) + // CHECK: call void @__kmpc_doacross_wait(ptr @[[GLOB7]], i32 [[OMP_THREAD8]], ptr [[TMP10]]) + omp.ordered depend_type(dependsink) depend_vec(%arg3, %arg4, %arg5, %arg6 : i64, i64, i64, i64) {num_loops_val = 2 : i64} + + // CHECK: [[TMP11:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR9]], i64 0, i64 0 + // CHECK: store i64 [[ARG0]], ptr [[TMP11]], align 8 + // CHECK: [[TMP12:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR9]], i64 0, i64 1 + // CHECK: store i64 [[ARG1]], ptr [[TMP12]], align 8 + // CHECK: [[TMP13:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR9]], i64 0, i64 0 + // CHECK: [[OMP_THREAD10:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB9:[0-9]+]]) + // CHECK: call void @__kmpc_doacross_post(ptr @[[GLOB9]], i32 [[OMP_THREAD10]], ptr [[TMP13]]) + omp.ordered depend_type(dependsource) depend_vec(%arg3, %arg4 : i64, i64) {num_loops_val = 2 : i64} - // CHECK: [[TMP11:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR9]], i64 0, i64 0 - // CHECK: store i64 [[ARG0]], ptr [[TMP11]], align 8 - // CHECK: [[TMP12:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR9]], i64 0, i64 1 - // CHECK: store i64 [[ARG1]], ptr [[TMP12]], align 8 - // CHECK: [[TMP13:%.*]] = getelementptr inbounds [2 x i64], ptr [[ADDR9]], i64 0, i64 0 - // CHECK: [[OMP_THREAD10:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB9:[0-9]+]]) - // CHECK: call void @__kmpc_doacross_post(ptr @[[GLOB9]], i32 [[OMP_THREAD10]], ptr [[TMP13]]) - omp.ordered depend_type(dependsource) depend_vec(%arg3, %arg4 : i64, i64) {num_loops_val = 2 : i64} - - omp.yield + omp.yield + } + omp.terminator } llvm.return @@ -2133,10 +2197,13 @@ llvm.func @omp_sections_with_clauses() -> () { // introduction mechanism itself is tested elsewhere. // CHECK-LABEL: @repeated_successor llvm.func @repeated_successor(%arg0: i64, %arg1: i64, %arg2: i64, %arg3: i1) { - omp.wsloop for (%arg4) : i64 = (%arg0) to (%arg1) step (%arg2) { - llvm.cond_br %arg3, ^bb1(%arg0 : i64), ^bb1(%arg1 : i64) - ^bb1(%0: i64): // 2 preds: ^bb0, ^bb0 - omp.yield + omp.wsloop { + omp.loop_nest (%arg4) : i64 = (%arg0) to (%arg1) step (%arg2) { + llvm.cond_br %arg3, ^bb1(%arg0 : i64), ^bb1(%arg1 : i64) + ^bb1(%0: i64): // 2 preds: ^bb0, ^bb0 + omp.yield + } + omp.terminator } llvm.return } diff --git a/mlir/test/Target/LLVMIR/openmp-nested.mlir b/mlir/test/Target/LLVMIR/openmp-nested.mlir index e1fdfdd24a3cb..ce5f22f10d7dc 100644 --- a/mlir/test/Target/LLVMIR/openmp-nested.mlir +++ b/mlir/test/Target/LLVMIR/openmp-nested.mlir @@ -11,20 +11,26 @@ module { %2 = llvm.mlir.constant(0 : index) : i64 %4 = llvm.mlir.constant(0 : i32) : i32 %12 = llvm.alloca %0 x i64 : (i64) -> !llvm.ptr - omp.wsloop for (%arg2) : i64 = (%2) to (%1) step (%0) { - omp.parallel { - omp.wsloop for (%arg3) : i64 = (%2) to (%0) step (%0) { - llvm.store %2, %12 : i64, !llvm.ptr - omp.yield + omp.wsloop { + omp.loop_nest (%arg2) : i64 = (%2) to (%1) step (%0) { + omp.parallel { + omp.wsloop { + omp.loop_nest (%arg3) : i64 = (%2) to (%0) step (%0) { + llvm.store %2, %12 : i64, !llvm.ptr + omp.yield + } + omp.terminator + } + omp.terminator } - omp.terminator + %19 = llvm.load %12 : !llvm.ptr -> i64 + %20 = llvm.trunc %19 : i64 to i32 + %5 = llvm.mlir.addressof @str0 : !llvm.ptr + %6 = llvm.getelementptr %5[%4, %4] : (!llvm.ptr, i32, i32) -> !llvm.ptr, !llvm.array<29 x i8> + %21 = llvm.call @printf(%6, %20, %20) vararg(!llvm.func): (!llvm.ptr, i32, i32) -> i32 + omp.yield } - %19 = llvm.load %12 : !llvm.ptr -> i64 - %20 = llvm.trunc %19 : i64 to i32 - %5 = llvm.mlir.addressof @str0 : !llvm.ptr - %6 = llvm.getelementptr %5[%4, %4] : (!llvm.ptr, i32, i32) -> !llvm.ptr, !llvm.array<29 x i8> - %21 = llvm.call @printf(%6, %20, %20) vararg(!llvm.func): (!llvm.ptr, i32, i32) -> i32 - omp.yield + omp.terminator } omp.terminator } diff --git a/mlir/test/Target/LLVMIR/openmp-reduction.mlir b/mlir/test/Target/LLVMIR/openmp-reduction.mlir index 39b64d71a2274..bfdad8c19335e 100644 --- a/mlir/test/Target/LLVMIR/openmp-reduction.mlir +++ b/mlir/test/Target/LLVMIR/openmp-reduction.mlir @@ -26,13 +26,15 @@ llvm.func @simple_reduction(%lb : i64, %ub : i64, %step : i64) { %c1 = llvm.mlir.constant(1 : i32) : i32 %0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr omp.parallel { - omp.wsloop reduction(@add_f32 %0 -> %prv : !llvm.ptr) - for (%iv) : i64 = (%lb) to (%ub) step (%step) { - %1 = llvm.mlir.constant(2.0 : f32) : f32 - %2 = llvm.load %prv : !llvm.ptr -> f32 - %3 = llvm.fadd %1, %2 : f32 - llvm.store %3, %prv : f32, !llvm.ptr - omp.yield + omp.wsloop reduction(@add_f32 %0 -> %prv : !llvm.ptr) { + omp.loop_nest (%iv) : i64 = (%lb) to (%ub) step (%step) { + %1 = llvm.mlir.constant(2.0 : f32) : f32 + %2 = llvm.load %prv : !llvm.ptr -> f32 + %3 = llvm.fadd %1, %2 : f32 + llvm.store %3, %prv : f32, !llvm.ptr + omp.yield + } + omp.terminator } omp.terminator } @@ -105,16 +107,18 @@ llvm.func @reuse_declaration(%lb : i64, %ub : i64, %step : i64) { %0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr %2 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr omp.parallel { - omp.wsloop reduction(@add_f32 %0 -> %prv0 : !llvm.ptr, @add_f32 %2 -> %prv1 : !llvm.ptr) - for (%iv) : i64 = (%lb) to (%ub) step (%step) { - %1 = llvm.mlir.constant(2.0 : f32) : f32 - %3 = llvm.load %prv0 : !llvm.ptr -> f32 - %4 = llvm.fadd %3, %1 : f32 - llvm.store %4, %prv0 : f32, !llvm.ptr - %5 = llvm.load %prv1 : !llvm.ptr -> f32 - %6 = llvm.fadd %5, %1 : f32 - llvm.store %6, %prv1 : f32, !llvm.ptr - omp.yield + omp.wsloop reduction(@add_f32 %0 -> %prv0 : !llvm.ptr, @add_f32 %2 -> %prv1 : !llvm.ptr) { + omp.loop_nest (%iv) : i64 = (%lb) to (%ub) step (%step) { + %1 = llvm.mlir.constant(2.0 : f32) : f32 + %3 = llvm.load %prv0 : !llvm.ptr -> f32 + %4 = llvm.fadd %3, %1 : f32 + llvm.store %4, %prv0 : f32, !llvm.ptr + %5 = llvm.load %prv1 : !llvm.ptr -> f32 + %6 = llvm.fadd %5, %1 : f32 + llvm.store %6, %prv1 : f32, !llvm.ptr + omp.yield + } + omp.terminator } omp.terminator } @@ -195,13 +199,15 @@ llvm.func @missing_omp_reduction(%lb : i64, %ub : i64, %step : i64) { %0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr %2 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr omp.parallel { - omp.wsloop reduction(@add_f32 %0 -> %prv0 : !llvm.ptr, @add_f32 %2 -> %prv1 : !llvm.ptr) - for (%iv) : i64 = (%lb) to (%ub) step (%step) { - %1 = llvm.mlir.constant(2.0 : f32) : f32 - %3 = llvm.load %prv0 : !llvm.ptr -> f32 - %4 = llvm.fadd %3, %1 : f32 - llvm.store %4, %prv0 : f32, !llvm.ptr - omp.yield + omp.wsloop reduction(@add_f32 %0 -> %prv0 : !llvm.ptr, @add_f32 %2 -> %prv1 : !llvm.ptr) { + omp.loop_nest (%iv) : i64 = (%lb) to (%ub) step (%step) { + %1 = llvm.mlir.constant(2.0 : f32) : f32 + %3 = llvm.load %prv0 : !llvm.ptr -> f32 + %4 = llvm.fadd %3, %1 : f32 + llvm.store %4, %prv0 : f32, !llvm.ptr + omp.yield + } + omp.terminator } omp.terminator } @@ -280,16 +286,18 @@ llvm.func @double_reference(%lb : i64, %ub : i64, %step : i64) { %c1 = llvm.mlir.constant(1 : i32) : i32 %0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr omp.parallel { - omp.wsloop reduction(@add_f32 %0 -> %prv : !llvm.ptr) - for (%iv) : i64 = (%lb) to (%ub) step (%step) { - %1 = llvm.mlir.constant(2.0 : f32) : f32 - %2 = llvm.load %prv : !llvm.ptr -> f32 - %3 = llvm.fadd %2, %1 : f32 - llvm.store %3, %prv : f32, !llvm.ptr - %4 = llvm.load %prv : !llvm.ptr -> f32 - %5 = llvm.fadd %4, %1 : f32 - llvm.store %5, %prv : f32, !llvm.ptr - omp.yield + omp.wsloop reduction(@add_f32 %0 -> %prv : !llvm.ptr) { + omp.loop_nest (%iv) : i64 = (%lb) to (%ub) step (%step) { + %1 = llvm.mlir.constant(2.0 : f32) : f32 + %2 = llvm.load %prv : !llvm.ptr -> f32 + %3 = llvm.fadd %2, %1 : f32 + llvm.store %3, %prv : f32, !llvm.ptr + %4 = llvm.load %prv : !llvm.ptr -> f32 + %5 = llvm.fadd %4, %1 : f32 + llvm.store %5, %prv : f32, !llvm.ptr + omp.yield + } + omp.terminator } omp.terminator } @@ -374,16 +382,18 @@ llvm.func @no_atomic(%lb : i64, %ub : i64, %step : i64) { %0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr %2 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr omp.parallel { - omp.wsloop reduction(@add_f32 %0 -> %prv0 : !llvm.ptr, @mul_f32 %2 -> %prv1 : !llvm.ptr) - for (%iv) : i64 = (%lb) to (%ub) step (%step) { - %1 = llvm.mlir.constant(2.0 : f32) : f32 - %3 = llvm.load %prv0 : !llvm.ptr -> f32 - %4 = llvm.fadd %3, %1 : f32 - llvm.store %4, %prv0 : f32, !llvm.ptr - %5 = llvm.load %prv1 : !llvm.ptr -> f32 - %6 = llvm.fmul %5, %1 : f32 - llvm.store %6, %prv1 : f32, !llvm.ptr - omp.yield + omp.wsloop reduction(@add_f32 %0 -> %prv0 : !llvm.ptr, @mul_f32 %2 -> %prv1 : !llvm.ptr) { + omp.loop_nest (%iv) : i64 = (%lb) to (%ub) step (%step) { + %1 = llvm.mlir.constant(2.0 : f32) : f32 + %3 = llvm.load %prv0 : !llvm.ptr -> f32 + %4 = llvm.fadd %3, %1 : f32 + llvm.store %4, %prv0 : f32, !llvm.ptr + %5 = llvm.load %prv1 : !llvm.ptr -> f32 + %6 = llvm.fmul %5, %1 : f32 + llvm.store %6, %prv1 : f32, !llvm.ptr + omp.yield + } + omp.terminator } omp.terminator } @@ -531,12 +541,15 @@ llvm.func @parallel_nested_workshare_reduction(%ub : i64) { %step = llvm.mlir.constant(1 : i64) : i64 omp.parallel reduction(@add_i32 %0 -> %prv : !llvm.ptr) { - omp.wsloop for (%iv) : i64 = (%lb) to (%ub) step (%step) { - %ival = llvm.trunc %iv : i64 to i32 - %lprv = llvm.load %prv : !llvm.ptr -> i32 - %add = llvm.add %lprv, %ival : i32 - llvm.store %add, %prv : i32, !llvm.ptr - omp.yield + omp.wsloop { + omp.loop_nest (%iv) : i64 = (%lb) to (%ub) step (%step) { + %ival = llvm.trunc %iv : i64 to i32 + %lprv = llvm.load %prv : !llvm.ptr -> i32 + %add = llvm.add %lprv, %ival : i32 + llvm.store %add, %prv : i32, !llvm.ptr + omp.yield + } + omp.terminator } omp.terminator } diff --git a/mlir/test/Target/LLVMIR/openmp-wsloop-reduction-cleanup.mlir b/mlir/test/Target/LLVMIR/openmp-wsloop-reduction-cleanup.mlir index 3842522934e48..7a1a31830ce9b 100644 --- a/mlir/test/Target/LLVMIR/openmp-wsloop-reduction-cleanup.mlir +++ b/mlir/test/Target/LLVMIR/openmp-wsloop-reduction-cleanup.mlir @@ -30,9 +30,12 @@ %loop_ub = llvm.mlir.constant(9 : i32) : i32 %loop_lb = llvm.mlir.constant(0 : i32) : i32 %loop_step = llvm.mlir.constant(1 : i32) : i32 - omp.wsloop byref reduction(@add_reduction_i_32 %1 -> %arg0 : !llvm.ptr, @add_reduction_i_32 %2 -> %arg1 : !llvm.ptr) for (%loop_cnt) : i32 = (%loop_lb) to (%loop_ub) inclusive step (%loop_step) { - llvm.store %0, %arg0 : i32, !llvm.ptr - llvm.store %0, %arg1 : i32, !llvm.ptr + omp.wsloop byref reduction(@add_reduction_i_32 %1 -> %arg0 : !llvm.ptr, @add_reduction_i_32 %2 -> %arg1 : !llvm.ptr) { + omp.loop_nest (%loop_cnt) : i32 = (%loop_lb) to (%loop_ub) inclusive step (%loop_step) { + llvm.store %0, %arg0 : i32, !llvm.ptr + llvm.store %0, %arg1 : i32, !llvm.ptr + omp.yield + } omp.terminator } llvm.return