From b0d76b0f7428f423b07c9ea2d191bd5444e158cd Mon Sep 17 00:00:00 2001 From: Ivan Radanov Ivanov Date: Wed, 31 Jul 2024 14:09:09 +0900 Subject: [PATCH 01/34] [MLIR][omp] Add omp.workshare op Add custom omp loop wrapper Add recursive memory effects trait to workshare Remove stray include Remove omp.workshare verifier Add assembly format for wrapper and add test Add verification and descriptions --- .../Dialect/OpenMP/OpenMPClauseOperands.h | 73 ++++++++++++++++++- mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td | 43 +++++++++++ mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp | 23 ++++++ mlir/test/Dialect/OpenMP/invalid.mlir | 42 +++++++++++ mlir/test/Dialect/OpenMP/ops.mlir | 69 ++++++++++++++++++ 5 files changed, 246 insertions(+), 4 deletions(-) diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPClauseOperands.h b/mlir/include/mlir/Dialect/OpenMP/OpenMPClauseOperands.h index 1247a871f93c6..2223636c0d7a0 100644 --- a/mlir/include/mlir/Dialect/OpenMP/OpenMPClauseOperands.h +++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPClauseOperands.h @@ -44,10 +44,75 @@ struct DeviceTypeClauseOps { // TODO: Add `indirect` clause. using DeclareTargetOperands = detail::Clauses; -/// omp.target_enter_data, omp.target_exit_data and omp.target_update take the -/// same clauses, so we give the structure to be shared by all of them a -/// representative name. -using TargetEnterExitUpdateDataOperands = TargetEnterDataOperands; +using DistributeOperands = + detail::Clauses; + +using LoopNestOperands = detail::Clauses; + +using MaskedOperands = detail::Clauses; + +using OrderedOperands = detail::Clauses; + +using OrderedRegionOperands = detail::Clauses; + +using ParallelOperands = + detail::Clauses; + +using SectionsOperands = detail::Clauses; + +using SimdOperands = + detail::Clauses; + +using SingleOperands = detail::Clauses; + +// TODO `defaultmap`, `uses_allocators` clauses. +using TargetOperands = + detail::Clauses; + +using TargetDataOperands = + detail::Clauses; + +using TargetEnterExitUpdateDataOperands = + detail::Clauses; + +// TODO `affinity`, `detach` clauses. +using TaskOperands = + detail::Clauses; + +using TaskgroupOperands = + detail::Clauses; + +using TaskloopOperands = + detail::Clauses; + +using TaskwaitOperands = detail::Clauses; + +using TeamsOperands = + detail::Clauses; + +using WorkshareOperands = detail::Clauses; + +using WsloopOperands = + detail::Clauses; } // namespace omp } // namespace mlir diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td index 45313200d4f0b..71908b6112e3e 100644 --- a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td +++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td @@ -313,6 +313,49 @@ def SingleOp : OpenMP_Op<"single", traits = [ let hasVerifier = 1; } +//===----------------------------------------------------------------------===// +// 2.8.3 Workshare Construct +//===----------------------------------------------------------------------===// + +def WorkshareOp : OpenMP_Op<"workshare", traits = [ + RecursiveMemoryEffects, + ], clauses = [ + OpenMP_NowaitClause, + ], singleRegion = true> { + let summary = "workshare directive"; + let description = [{ + The workshare construct divides the execution of the enclosed structured + block into separate units of work, and causes the threads of the team to + share the work such that each unit is executed only once by one thread, in + the context of its implicit task + + This operation is used for the intermediate representation of the workshare + block before the work gets divided between the threads. See the flang + LowerWorkshare pass for details. + }] # clausesDescription; + + let builders = [ + OpBuilder<(ins CArg<"const WorkshareOperands &">:$clauses)> + ]; +} + +def WorkshareLoopWrapperOp : OpenMP_Op<"workshare.loop_wrapper", traits = [ + DeclareOpInterfaceMethods, + RecursiveMemoryEffects, SingleBlock + ], singleRegion = true> { + let summary = "contains loop nests to be parallelized by workshare"; + let description = [{ + This operation wraps a loop nest that is marked for dividing into units of + work by an encompassing omp.workshare operation. + }]; + + let builders = [ + OpBuilder<(ins), [{ build($_builder, $_state, {}); }]> + ]; + let assemblyFormat = "$region attr-dict"; + let hasVerifier = 1; +} + //===----------------------------------------------------------------------===// // Loop Nest //===----------------------------------------------------------------------===// diff --git a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp index e1df647d6a3c7..cbb8fe36eb24e 100644 --- a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp +++ b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp @@ -1921,6 +1921,29 @@ LogicalResult SingleOp::verify() { getCopyprivateSyms()); } +//===----------------------------------------------------------------------===// +// WorkshareOp +//===----------------------------------------------------------------------===// + +void WorkshareOp::build(OpBuilder &builder, OperationState &state, + const WorkshareOperands &clauses) { + WorkshareOp::build(builder, state, clauses.nowait); +} + +//===----------------------------------------------------------------------===// +// WorkshareLoopWrapperOp +//===----------------------------------------------------------------------===// + +LogicalResult WorkshareLoopWrapperOp::verify() { + if (!isWrapper()) + return emitOpError() << "must be a loop wrapper"; + if (getNestedWrapper()) + return emitError() << "nested wrappers not supported"; + if (!(*this)->getParentOfType()) + return emitError() << "must be nested in an omp.workshare"; + return success(); +} + //===----------------------------------------------------------------------===// // LoopWrapperInterface //===----------------------------------------------------------------------===// diff --git a/mlir/test/Dialect/OpenMP/invalid.mlir b/mlir/test/Dialect/OpenMP/invalid.mlir index fd89ec31c64a6..e8482574408c0 100644 --- a/mlir/test/Dialect/OpenMP/invalid.mlir +++ b/mlir/test/Dialect/OpenMP/invalid.mlir @@ -2577,3 +2577,45 @@ func.func @omp_taskloop_invalid_composite(%lb: index, %ub: index, %step: index) } {omp.composite} return } + +// ----- +func.func @nested_wrapper(%idx : index) { + omp.workshare { + // expected-error @below {{nested wrappers not supported}} + omp.workshare.loop_wrapper { + omp.simd { + omp.loop_nest (%iv) : index = (%idx) to (%idx) step (%idx) { + omp.yield + } + omp.terminator + } + omp.terminator + } + omp.terminator + } + return +} + +// ----- +func.func @not_wrapper() { + omp.workshare { + // expected-error @below {{must be a loop wrapper}} + omp.workshare.loop_wrapper { + omp.terminator + } + omp.terminator + } + return +} + +// ----- +func.func @missing_workshare(%idx : index) { + // expected-error @below {{must be nested in an omp.workshare}} + omp.workshare.loop_wrapper { + omp.loop_nest (%iv) : index = (%idx) to (%idx) step (%idx) { + omp.yield + } + omp.terminator + } + return +} diff --git a/mlir/test/Dialect/OpenMP/ops.mlir b/mlir/test/Dialect/OpenMP/ops.mlir index 6f11b451fa00a..d839db9d7583e 100644 --- a/mlir/test/Dialect/OpenMP/ops.mlir +++ b/mlir/test/Dialect/OpenMP/ops.mlir @@ -2749,3 +2749,72 @@ func.func @omp_target_private(%map1: memref, %map2: memref, %priv_ return } + +// CHECK-LABEL: func @omp_workshare +func.func @omp_workshare() { + // CHECK: omp.workshare { + omp.workshare { + "test.payload"() : () -> () + // CHECK: omp.terminator + omp.terminator + } + return +} + +// CHECK-LABEL: func @omp_workshare_nowait +func.func @omp_workshare_nowait() { + // CHECK: omp.workshare nowait { + omp.workshare nowait { + "test.payload"() : () -> () + // CHECK: omp.terminator + omp.terminator + } + return +} + +// CHECK-LABEL: func @omp_workshare_multiple_blocks +func.func @omp_workshare_multiple_blocks() { + // CHECK: omp.workshare { + omp.workshare { + cf.br ^bb2 + ^bb2: + // CHECK: omp.terminator + omp.terminator + } + return +} + +// CHECK-LABEL: func @omp_workshare.loop_wrapper +func.func @omp_workshare.loop_wrapper(%idx : index) { + // CHECK-NEXT: omp.workshare { + omp.workshare { + // CHECK-NEXT: omp.workshare.loop_wrapper + omp.workshare.loop_wrapper { + // CHECK-NEXT: omp.loop_nest + omp.loop_nest (%iv) : index = (%idx) to (%idx) step (%idx) { + omp.yield + } + omp.terminator + } + omp.terminator + } + return +} + +// CHECK-LABEL: func @omp_workshare.loop_wrapper_attrs +func.func @omp_workshare.loop_wrapper_attrs(%idx : index) { + // CHECK-NEXT: omp.workshare { + omp.workshare { + // CHECK-NEXT: omp.workshare.loop_wrapper { + omp.workshare.loop_wrapper { + // CHECK-NEXT: omp.loop_nest + omp.loop_nest (%iv) : index = (%idx) to (%idx) step (%idx) { + omp.yield + } + omp.terminator + // CHECK: } {attr_in_dict} + } {attr_in_dict} + omp.terminator + } + return +} From 88d6d03f1ca0e9769eb94e0c620e7357dec2e3e1 Mon Sep 17 00:00:00 2001 From: Ivan Radanov Ivanov Date: Thu, 22 Aug 2024 18:05:31 +0900 Subject: [PATCH 02/34] wrong replace --- mlir/test/Dialect/OpenMP/ops.mlir | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mlir/test/Dialect/OpenMP/ops.mlir b/mlir/test/Dialect/OpenMP/ops.mlir index d839db9d7583e..c4d4d5055706e 100644 --- a/mlir/test/Dialect/OpenMP/ops.mlir +++ b/mlir/test/Dialect/OpenMP/ops.mlir @@ -2784,8 +2784,8 @@ func.func @omp_workshare_multiple_blocks() { return } -// CHECK-LABEL: func @omp_workshare.loop_wrapper -func.func @omp_workshare.loop_wrapper(%idx : index) { +// CHECK-LABEL: func @omp_workshare_loop_wrapper +func.func @omp_workshare_loop_wrapper(%idx : index) { // CHECK-NEXT: omp.workshare { omp.workshare { // CHECK-NEXT: omp.workshare.loop_wrapper @@ -2801,8 +2801,8 @@ func.func @omp_workshare.loop_wrapper(%idx : index) { return } -// CHECK-LABEL: func @omp_workshare.loop_wrapper_attrs -func.func @omp_workshare.loop_wrapper_attrs(%idx : index) { +// CHECK-LABEL: func @omp_workshare_loop_wrapper_attrs +func.func @omp_workshare_loop_wrapper_attrs(%idx : index) { // CHECK-NEXT: omp.workshare { omp.workshare { // CHECK-NEXT: omp.workshare.loop_wrapper { From f8d7b474aeea8123f4cfdae805b14bdfa569db85 Mon Sep 17 00:00:00 2001 From: Ivan Radanov Ivanov Date: Sun, 20 Oct 2024 01:33:13 +0900 Subject: [PATCH 03/34] Fix wsloopwrapperop --- .../Dialect/OpenMP/OpenMPClauseOperands.h | 73 +------------------ mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td | 2 +- mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp | 4 - mlir/test/Dialect/OpenMP/ops.mlir | 2 - 4 files changed, 5 insertions(+), 76 deletions(-) diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPClauseOperands.h b/mlir/include/mlir/Dialect/OpenMP/OpenMPClauseOperands.h index 2223636c0d7a0..1247a871f93c6 100644 --- a/mlir/include/mlir/Dialect/OpenMP/OpenMPClauseOperands.h +++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPClauseOperands.h @@ -44,75 +44,10 @@ struct DeviceTypeClauseOps { // TODO: Add `indirect` clause. using DeclareTargetOperands = detail::Clauses; -using DistributeOperands = - detail::Clauses; - -using LoopNestOperands = detail::Clauses; - -using MaskedOperands = detail::Clauses; - -using OrderedOperands = detail::Clauses; - -using OrderedRegionOperands = detail::Clauses; - -using ParallelOperands = - detail::Clauses; - -using SectionsOperands = detail::Clauses; - -using SimdOperands = - detail::Clauses; - -using SingleOperands = detail::Clauses; - -// TODO `defaultmap`, `uses_allocators` clauses. -using TargetOperands = - detail::Clauses; - -using TargetDataOperands = - detail::Clauses; - -using TargetEnterExitUpdateDataOperands = - detail::Clauses; - -// TODO `affinity`, `detach` clauses. -using TaskOperands = - detail::Clauses; - -using TaskgroupOperands = - detail::Clauses; - -using TaskloopOperands = - detail::Clauses; - -using TaskwaitOperands = detail::Clauses; - -using TeamsOperands = - detail::Clauses; - -using WorkshareOperands = detail::Clauses; - -using WsloopOperands = - detail::Clauses; +/// omp.target_enter_data, omp.target_exit_data and omp.target_update take the +/// same clauses, so we give the structure to be shared by all of them a +/// representative name. +using TargetEnterExitUpdateDataOperands = TargetEnterDataOperands; } // namespace omp } // namespace mlir diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td index 71908b6112e3e..c8e2c9ed4fd61 100644 --- a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td +++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td @@ -340,7 +340,7 @@ def WorkshareOp : OpenMP_Op<"workshare", traits = [ } def WorkshareLoopWrapperOp : OpenMP_Op<"workshare.loop_wrapper", traits = [ - DeclareOpInterfaceMethods, + DeclareOpInterfaceMethods, NoTerminator, RecursiveMemoryEffects, SingleBlock ], singleRegion = true> { let summary = "contains loop nests to be parallelized by workshare"; diff --git a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp index cbb8fe36eb24e..e849f68e9c838 100644 --- a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp +++ b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp @@ -1935,10 +1935,6 @@ void WorkshareOp::build(OpBuilder &builder, OperationState &state, //===----------------------------------------------------------------------===// LogicalResult WorkshareLoopWrapperOp::verify() { - if (!isWrapper()) - return emitOpError() << "must be a loop wrapper"; - if (getNestedWrapper()) - return emitError() << "nested wrappers not supported"; if (!(*this)->getParentOfType()) return emitError() << "must be nested in an omp.workshare"; return success(); diff --git a/mlir/test/Dialect/OpenMP/ops.mlir b/mlir/test/Dialect/OpenMP/ops.mlir index c4d4d5055706e..0532b71307a61 100644 --- a/mlir/test/Dialect/OpenMP/ops.mlir +++ b/mlir/test/Dialect/OpenMP/ops.mlir @@ -2794,7 +2794,6 @@ func.func @omp_workshare_loop_wrapper(%idx : index) { omp.loop_nest (%iv) : index = (%idx) to (%idx) step (%idx) { omp.yield } - omp.terminator } omp.terminator } @@ -2811,7 +2810,6 @@ func.func @omp_workshare_loop_wrapper_attrs(%idx : index) { omp.loop_nest (%iv) : index = (%idx) to (%idx) step (%idx) { omp.yield } - omp.terminator // CHECK: } {attr_in_dict} } {attr_in_dict} omp.terminator From d001eec514d0e7104a2279165e76a0c1694174a1 Mon Sep 17 00:00:00 2001 From: Ivan Radanov Ivanov Date: Sun, 20 Oct 2024 02:22:08 +0900 Subject: [PATCH 04/34] Fix op tests --- mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp | 2 ++ mlir/test/Dialect/OpenMP/invalid.mlir | 11 ++++------- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp index e849f68e9c838..43c1ec66e1ae3 100644 --- a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp +++ b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp @@ -1937,6 +1937,8 @@ void WorkshareOp::build(OpBuilder &builder, OperationState &state, LogicalResult WorkshareLoopWrapperOp::verify() { if (!(*this)->getParentOfType()) return emitError() << "must be nested in an omp.workshare"; + if (getNestedWrapper()) + return emitError() << "cannot be composite"; return success(); } diff --git a/mlir/test/Dialect/OpenMP/invalid.mlir b/mlir/test/Dialect/OpenMP/invalid.mlir index e8482574408c0..d56629e76b09c 100644 --- a/mlir/test/Dialect/OpenMP/invalid.mlir +++ b/mlir/test/Dialect/OpenMP/invalid.mlir @@ -2581,15 +2581,13 @@ func.func @omp_taskloop_invalid_composite(%lb: index, %ub: index, %step: index) // ----- func.func @nested_wrapper(%idx : index) { omp.workshare { - // expected-error @below {{nested wrappers not supported}} + // expected-error @below {{cannot be composite}} omp.workshare.loop_wrapper { omp.simd { omp.loop_nest (%iv) : index = (%idx) to (%idx) step (%idx) { omp.yield } - omp.terminator - } - omp.terminator + } {omp.composite} } omp.terminator } @@ -2599,9 +2597,9 @@ func.func @nested_wrapper(%idx : index) { // ----- func.func @not_wrapper() { omp.workshare { - // expected-error @below {{must be a loop wrapper}} + // expected-error @below {{op nested in loop wrapper is not another loop wrapper or `omp.loop_nest`}} omp.workshare.loop_wrapper { - omp.terminator + %0 = arith.constant 0 : index } omp.terminator } @@ -2615,7 +2613,6 @@ func.func @missing_workshare(%idx : index) { omp.loop_nest (%iv) : index = (%idx) to (%idx) step (%idx) { omp.yield } - omp.terminator } return } From bf363883787e9b4989dd858f8573579688f7044b Mon Sep 17 00:00:00 2001 From: Ivan Radanov Ivanov Date: Wed, 31 Jul 2024 14:11:47 +0900 Subject: [PATCH 05/34] [flang][omp] Emit omp.workshare in frontend Fix lower test for workshare --- flang/lib/Lower/OpenMP/OpenMP.cpp | 30 +++++++++++++++++++++++---- flang/test/Lower/OpenMP/workshare.f90 | 6 +++--- 2 files changed, 29 insertions(+), 7 deletions(-) diff --git a/flang/lib/Lower/OpenMP/OpenMP.cpp b/flang/lib/Lower/OpenMP/OpenMP.cpp index cf469003b7298..22f6d5bd09cd6 100644 --- a/flang/lib/Lower/OpenMP/OpenMP.cpp +++ b/flang/lib/Lower/OpenMP/OpenMP.cpp @@ -1330,6 +1330,15 @@ static void genTaskwaitClauses(lower::AbstractConverter &converter, loc, llvm::omp::Directive::OMPD_taskwait); } +static void genWorkshareClauses(lower::AbstractConverter &converter, + semantics::SemanticsContext &semaCtx, + lower::StatementContext &stmtCtx, + const List &clauses, mlir::Location loc, + mlir::omp::WorkshareOperands &clauseOps) { + ClauseProcessor cp(converter, semaCtx, clauses); + cp.processNowait(clauseOps); +} + static void genTeamsClauses(lower::AbstractConverter &converter, semantics::SemanticsContext &semaCtx, lower::StatementContext &stmtCtx, @@ -1923,6 +1932,22 @@ genTaskyieldOp(lower::AbstractConverter &converter, lower::SymMap &symTable, return converter.getFirOpBuilder().create(loc); } +static mlir::omp::WorkshareOp +genWorkshareOp(lower::AbstractConverter &converter, lower::SymMap &symTable, + semantics::SemanticsContext &semaCtx, lower::pft::Evaluation &eval, + mlir::Location loc, const ConstructQueue &queue, + ConstructQueue::iterator item) { + lower::StatementContext stmtCtx; + mlir::omp::WorkshareOperands clauseOps; + genWorkshareClauses(converter, semaCtx, stmtCtx, item->clauses, loc, clauseOps); + + return genOpWithBody( + OpWithBodyGenInfo(converter, symTable, semaCtx, loc, eval, + llvm::omp::Directive::OMPD_workshare) + .setClauses(&item->clauses), + queue, item, clauseOps); +} + static mlir::omp::TeamsOp genTeamsOp(lower::AbstractConverter &converter, lower::SymMap &symTable, semantics::SemanticsContext &semaCtx, lower::pft::Evaluation &eval, @@ -2515,10 +2540,7 @@ static void genOMPDispatch(lower::AbstractConverter &converter, llvm::omp::getOpenMPDirectiveName(dir) + ")"); // case llvm::omp::Directive::OMPD_workdistribute: case llvm::omp::Directive::OMPD_workshare: - // FIXME: Workshare is not a commonly used OpenMP construct, an - // implementation for this feature will come later. For the codes - // that use this construct, add a single construct for now. - genSingleOp(converter, symTable, semaCtx, eval, loc, queue, item); + genWorkshareOp(converter, symTable, semaCtx, eval, loc, queue, item); break; default: // Combined and composite constructs should have been split into a sequence diff --git a/flang/test/Lower/OpenMP/workshare.f90 b/flang/test/Lower/OpenMP/workshare.f90 index 1e11677a15e1f..8e771952f5b6d 100644 --- a/flang/test/Lower/OpenMP/workshare.f90 +++ b/flang/test/Lower/OpenMP/workshare.f90 @@ -6,7 +6,7 @@ subroutine sb1(arr) integer :: arr(:) !CHECK: omp.parallel { !$omp parallel -!CHECK: omp.single { +!CHECK: omp.workshare { !$omp workshare arr = 0 !$omp end workshare @@ -20,7 +20,7 @@ subroutine sb2(arr) integer :: arr(:) !CHECK: omp.parallel { !$omp parallel -!CHECK: omp.single nowait { +!CHECK: omp.workshare nowait { !$omp workshare arr = 0 !$omp end workshare nowait @@ -33,7 +33,7 @@ subroutine sb2(arr) subroutine sb3(arr) integer :: arr(:) !CHECK: omp.parallel { -!CHECK: omp.single { +!CHECK: omp.workshare { !$omp parallel workshare arr = 0 !$omp end parallel workshare From e23cf320ed37cb73971bed74cf260e524210a187 Mon Sep 17 00:00:00 2001 From: Ivan Radanov Ivanov Date: Thu, 22 Aug 2024 17:01:43 +0900 Subject: [PATCH 06/34] Fix function signature --- flang/lib/Lower/OpenMP/OpenMP.cpp | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/flang/lib/Lower/OpenMP/OpenMP.cpp b/flang/lib/Lower/OpenMP/OpenMP.cpp index 22f6d5bd09cd6..daeb928e53d06 100644 --- a/flang/lib/Lower/OpenMP/OpenMP.cpp +++ b/flang/lib/Lower/OpenMP/OpenMP.cpp @@ -1934,12 +1934,14 @@ genTaskyieldOp(lower::AbstractConverter &converter, lower::SymMap &symTable, static mlir::omp::WorkshareOp genWorkshareOp(lower::AbstractConverter &converter, lower::SymMap &symTable, - semantics::SemanticsContext &semaCtx, lower::pft::Evaluation &eval, - mlir::Location loc, const ConstructQueue &queue, - ConstructQueue::iterator item) { + semantics::SemanticsContext &semaCtx, + lower::pft::Evaluation &eval, mlir::Location loc, + const ConstructQueue &queue, + ConstructQueue::const_iterator item) { lower::StatementContext stmtCtx; mlir::omp::WorkshareOperands clauseOps; - genWorkshareClauses(converter, semaCtx, stmtCtx, item->clauses, loc, clauseOps); + genWorkshareClauses(converter, semaCtx, stmtCtx, item->clauses, loc, + clauseOps); return genOpWithBody( OpWithBodyGenInfo(converter, symTable, semaCtx, loc, eval, From 6f114e0501f1759eab34dc8ddfc3030c03037cd4 Mon Sep 17 00:00:00 2001 From: Ivan Radanov Ivanov Date: Thu, 22 Aug 2024 18:07:05 +0900 Subject: [PATCH 07/34] [flang] Introduce ws loop nest generation for HLFIR lowering Emit loop nests in a custom wrapper Only emit unordered loops as omp loops Fix uninitialized memory bug in genLoopNest --- .../flang/Optimizer/Builder/HLFIRTools.h | 12 +++-- flang/lib/Lower/ConvertCall.cpp | 2 +- flang/lib/Lower/OpenMP/ReductionProcessor.cpp | 4 +- flang/lib/Optimizer/Builder/HLFIRTools.cpp | 52 ++++++++++++++----- .../HLFIR/Transforms/BufferizeHLFIR.cpp | 3 +- .../LowerHLFIROrderedAssignments.cpp | 33 ++++++------ .../Transforms/OptimizedBufferization.cpp | 6 +-- 7 files changed, 69 insertions(+), 43 deletions(-) diff --git a/flang/include/flang/Optimizer/Builder/HLFIRTools.h b/flang/include/flang/Optimizer/Builder/HLFIRTools.h index 6b41025eea078..f073f494b3fb2 100644 --- a/flang/include/flang/Optimizer/Builder/HLFIRTools.h +++ b/flang/include/flang/Optimizer/Builder/HLFIRTools.h @@ -357,8 +357,8 @@ hlfir::ElementalOp genElementalOp( /// Structure to describe a loop nest. struct LoopNest { - fir::DoLoopOp outerLoop; - fir::DoLoopOp innerLoop; + mlir::Operation *outerOp = nullptr; + mlir::Block *body = nullptr; llvm::SmallVector oneBasedIndices; }; @@ -366,11 +366,13 @@ struct LoopNest { /// \p isUnordered specifies whether the loops in the loop nest /// are unordered. LoopNest genLoopNest(mlir::Location loc, fir::FirOpBuilder &builder, - mlir::ValueRange extents, bool isUnordered = false); + mlir::ValueRange extents, bool isUnordered = false, + bool emitWorkshareLoop = false); inline LoopNest genLoopNest(mlir::Location loc, fir::FirOpBuilder &builder, - mlir::Value shape, bool isUnordered = false) { + mlir::Value shape, bool isUnordered = false, + bool emitWorkshareLoop = false) { return genLoopNest(loc, builder, getIndexExtents(loc, builder, shape), - isUnordered); + isUnordered, emitWorkshareLoop); } /// Inline the body of an hlfir.elemental at the current insertion point diff --git a/flang/lib/Lower/ConvertCall.cpp b/flang/lib/Lower/ConvertCall.cpp index 9f5b58590fb79..e84e7afbe82e0 100644 --- a/flang/lib/Lower/ConvertCall.cpp +++ b/flang/lib/Lower/ConvertCall.cpp @@ -2135,7 +2135,7 @@ class ElementalCallBuilder { hlfir::genLoopNest(loc, builder, shape, !mustBeOrdered); mlir::ValueRange oneBasedIndices = loopNest.oneBasedIndices; auto insPt = builder.saveInsertionPoint(); - builder.setInsertionPointToStart(loopNest.innerLoop.getBody()); + builder.setInsertionPointToStart(loopNest.body); callContext.stmtCtx.pushScope(); for (auto &preparedActual : loweredActuals) if (preparedActual) diff --git a/flang/lib/Lower/OpenMP/ReductionProcessor.cpp b/flang/lib/Lower/OpenMP/ReductionProcessor.cpp index 6b98ea3d0615b..736de2ee511be 100644 --- a/flang/lib/Lower/OpenMP/ReductionProcessor.cpp +++ b/flang/lib/Lower/OpenMP/ReductionProcessor.cpp @@ -374,7 +374,7 @@ static void genBoxCombiner(fir::FirOpBuilder &builder, mlir::Location loc, // know this won't miss any opportuinties for clever elemental inlining hlfir::LoopNest nest = hlfir::genLoopNest( loc, builder, shapeShift.getExtents(), /*isUnordered=*/true); - builder.setInsertionPointToStart(nest.innerLoop.getBody()); + builder.setInsertionPointToStart(nest.body); mlir::Type refTy = fir::ReferenceType::get(seqTy.getEleTy()); auto lhsEleAddr = builder.create( loc, refTy, lhs, shapeShift, /*slice=*/mlir::Value{}, @@ -388,7 +388,7 @@ static void genBoxCombiner(fir::FirOpBuilder &builder, mlir::Location loc, builder, loc, redId, refTy, lhsEle, rhsEle); builder.create(loc, scalarReduction, lhsEleAddr); - builder.setInsertionPointAfter(nest.outerLoop); + builder.setInsertionPointAfter(nest.outerOp); builder.create(loc, lhsAddr); } diff --git a/flang/lib/Optimizer/Builder/HLFIRTools.cpp b/flang/lib/Optimizer/Builder/HLFIRTools.cpp index 8d0ae2f195178..333331378841e 100644 --- a/flang/lib/Optimizer/Builder/HLFIRTools.cpp +++ b/flang/lib/Optimizer/Builder/HLFIRTools.cpp @@ -20,6 +20,7 @@ #include "mlir/IR/IRMapping.h" #include "mlir/Support/LLVM.h" #include "llvm/ADT/TypeSwitch.h" +#include #include // Return explicit extents. If the base is a fir.box, this won't read it to @@ -855,26 +856,51 @@ mlir::Value hlfir::inlineElementalOp( hlfir::LoopNest hlfir::genLoopNest(mlir::Location loc, fir::FirOpBuilder &builder, - mlir::ValueRange extents, bool isUnordered) { + mlir::ValueRange extents, bool isUnordered, + bool emitWorkshareLoop) { + emitWorkshareLoop = emitWorkshareLoop && isUnordered; hlfir::LoopNest loopNest; assert(!extents.empty() && "must have at least one extent"); - auto insPt = builder.saveInsertionPoint(); + mlir::OpBuilder::InsertionGuard guard(builder); loopNest.oneBasedIndices.assign(extents.size(), mlir::Value{}); // Build loop nest from column to row. auto one = builder.create(loc, 1); mlir::Type indexType = builder.getIndexType(); - unsigned dim = extents.size() - 1; - for (auto extent : llvm::reverse(extents)) { - auto ub = builder.createConvert(loc, indexType, extent); - loopNest.innerLoop = - builder.create(loc, one, ub, one, isUnordered); - builder.setInsertionPointToStart(loopNest.innerLoop.getBody()); - // Reverse the indices so they are in column-major order. - loopNest.oneBasedIndices[dim--] = loopNest.innerLoop.getInductionVar(); - if (!loopNest.outerLoop) - loopNest.outerLoop = loopNest.innerLoop; + if (emitWorkshareLoop) { + auto wslw = builder.create(loc); + loopNest.outerOp = wslw; + builder.createBlock(&wslw.getRegion()); + mlir::omp::LoopNestOperands lnops; + lnops.loopInclusive = builder.getUnitAttr(); + for (auto extent : llvm::reverse(extents)) { + lnops.loopLowerBounds.push_back(one); + lnops.loopUpperBounds.push_back(extent); + lnops.loopSteps.push_back(one); + } + auto lnOp = builder.create(loc, lnops); + builder.create(loc); + mlir::Block *block = builder.createBlock(&lnOp.getRegion()); + for (auto extent : llvm::reverse(extents)) + block->addArgument(extent.getType(), extent.getLoc()); + loopNest.body = block; + builder.create(loc); + for (unsigned dim = 0; dim < extents.size(); dim++) + loopNest.oneBasedIndices[extents.size() - dim - 1] = + lnOp.getRegion().front().getArgument(dim); + } else { + unsigned dim = extents.size() - 1; + for (auto extent : llvm::reverse(extents)) { + auto ub = builder.createConvert(loc, indexType, extent); + auto doLoop = + builder.create(loc, one, ub, one, isUnordered); + loopNest.body = doLoop.getBody(); + builder.setInsertionPointToStart(loopNest.body); + // Reverse the indices so they are in column-major order. + loopNest.oneBasedIndices[dim--] = doLoop.getInductionVar(); + if (!loopNest.outerOp) + loopNest.outerOp = doLoop; + } } - builder.restoreInsertionPoint(insPt); return loopNest; } diff --git a/flang/lib/Optimizer/HLFIR/Transforms/BufferizeHLFIR.cpp b/flang/lib/Optimizer/HLFIR/Transforms/BufferizeHLFIR.cpp index a70a6b388c4b1..07794828fce26 100644 --- a/flang/lib/Optimizer/HLFIR/Transforms/BufferizeHLFIR.cpp +++ b/flang/lib/Optimizer/HLFIR/Transforms/BufferizeHLFIR.cpp @@ -26,6 +26,7 @@ #include "flang/Optimizer/HLFIR/HLFIRDialect.h" #include "flang/Optimizer/HLFIR/HLFIROps.h" #include "flang/Optimizer/HLFIR/Passes.h" +#include "mlir/Dialect/OpenMP/OpenMPDialect.h" #include "mlir/IR/Dominance.h" #include "mlir/IR/PatternMatch.h" #include "mlir/Pass/Pass.h" @@ -793,7 +794,7 @@ struct ElementalOpConversion hlfir::LoopNest loopNest = hlfir::genLoopNest(loc, builder, extents, !elemental.isOrdered()); auto insPt = builder.saveInsertionPoint(); - builder.setInsertionPointToStart(loopNest.innerLoop.getBody()); + builder.setInsertionPointToStart(loopNest.body); auto yield = hlfir::inlineElementalOp(loc, builder, elemental, loopNest.oneBasedIndices); hlfir::Entity elementValue(yield.getElementValue()); diff --git a/flang/lib/Optimizer/HLFIR/Transforms/LowerHLFIROrderedAssignments.cpp b/flang/lib/Optimizer/HLFIR/Transforms/LowerHLFIROrderedAssignments.cpp index 85dd517cb5791..424566462e8fe 100644 --- a/flang/lib/Optimizer/HLFIR/Transforms/LowerHLFIROrderedAssignments.cpp +++ b/flang/lib/Optimizer/HLFIR/Transforms/LowerHLFIROrderedAssignments.cpp @@ -464,7 +464,7 @@ void OrderedAssignmentRewriter::pre(hlfir::RegionAssignOp regionAssignOp) { // if the LHS is not). mlir::Value shape = hlfir::genShape(loc, builder, lhsEntity); elementalLoopNest = hlfir::genLoopNest(loc, builder, shape); - builder.setInsertionPointToStart(elementalLoopNest->innerLoop.getBody()); + builder.setInsertionPointToStart(elementalLoopNest->body); lhsEntity = hlfir::getElementAt(loc, builder, lhsEntity, elementalLoopNest->oneBasedIndices); rhsEntity = hlfir::getElementAt(loc, builder, rhsEntity, @@ -484,7 +484,7 @@ void OrderedAssignmentRewriter::pre(hlfir::RegionAssignOp regionAssignOp) { for (auto &cleanupConversion : argConversionCleanups) cleanupConversion(); if (elementalLoopNest) - builder.setInsertionPointAfter(elementalLoopNest->outerLoop); + builder.setInsertionPointAfter(elementalLoopNest->outerOp); } else { // TODO: preserve allocatable assignment aspects for forall once // they are conveyed in hlfir.region_assign. @@ -492,8 +492,7 @@ void OrderedAssignmentRewriter::pre(hlfir::RegionAssignOp regionAssignOp) { } generateCleanupIfAny(loweredLhs.elementalCleanup); if (loweredLhs.vectorSubscriptLoopNest) - builder.setInsertionPointAfter( - loweredLhs.vectorSubscriptLoopNest->outerLoop); + builder.setInsertionPointAfter(loweredLhs.vectorSubscriptLoopNest->outerOp); generateCleanupIfAny(oldRhsYield); generateCleanupIfAny(loweredLhs.nonElementalCleanup); } @@ -518,8 +517,8 @@ void OrderedAssignmentRewriter::pre(hlfir::WhereOp whereOp) { hlfir::Entity savedMask{maybeSaved->first}; mlir::Value shape = hlfir::genShape(loc, builder, savedMask); whereLoopNest = hlfir::genLoopNest(loc, builder, shape); - constructStack.push_back(whereLoopNest->outerLoop.getOperation()); - builder.setInsertionPointToStart(whereLoopNest->innerLoop.getBody()); + constructStack.push_back(whereLoopNest->outerOp); + builder.setInsertionPointToStart(whereLoopNest->body); mlir::Value cdt = hlfir::getElementAt(loc, builder, savedMask, whereLoopNest->oneBasedIndices); generateMaskIfOp(cdt); @@ -527,7 +526,7 @@ void OrderedAssignmentRewriter::pre(hlfir::WhereOp whereOp) { // If this is the same run as the one that saved the value, the clean-up // was left-over to be done now. auto insertionPoint = builder.saveInsertionPoint(); - builder.setInsertionPointAfter(whereLoopNest->outerLoop); + builder.setInsertionPointAfter(whereLoopNest->outerOp); generateCleanupIfAny(maybeSaved->second); builder.restoreInsertionPoint(insertionPoint); } @@ -539,8 +538,8 @@ void OrderedAssignmentRewriter::pre(hlfir::WhereOp whereOp) { mask.generateNoneElementalPart(builder, mapper); mlir::Value shape = mask.generateShape(builder, mapper); whereLoopNest = hlfir::genLoopNest(loc, builder, shape); - constructStack.push_back(whereLoopNest->outerLoop.getOperation()); - builder.setInsertionPointToStart(whereLoopNest->innerLoop.getBody()); + constructStack.push_back(whereLoopNest->outerOp); + builder.setInsertionPointToStart(whereLoopNest->body); mlir::Value cdt = generateMaskedEntity(mask); generateMaskIfOp(cdt); return; @@ -754,7 +753,7 @@ OrderedAssignmentRewriter::generateYieldedLHS( loweredLhs.vectorSubscriptLoopNest = hlfir::genLoopNest( loc, builder, loweredLhs.vectorSubscriptShape.value()); builder.setInsertionPointToStart( - loweredLhs.vectorSubscriptLoopNest->innerLoop.getBody()); + loweredLhs.vectorSubscriptLoopNest->body); } loweredLhs.lhs = temp->second.fetch(loc, builder); return loweredLhs; @@ -771,8 +770,7 @@ OrderedAssignmentRewriter::generateYieldedLHS( loweredLhs.vectorSubscriptLoopNest = hlfir::genLoopNest(loc, builder, *loweredLhs.vectorSubscriptShape, !elementalAddrLhs.isOrdered()); - builder.setInsertionPointToStart( - loweredLhs.vectorSubscriptLoopNest->innerLoop.getBody()); + builder.setInsertionPointToStart(loweredLhs.vectorSubscriptLoopNest->body); mapper.map(elementalAddrLhs.getIndices(), loweredLhs.vectorSubscriptLoopNest->oneBasedIndices); for (auto &op : elementalAddrLhs.getBody().front().without_terminator()) @@ -798,11 +796,11 @@ OrderedAssignmentRewriter::generateMaskedEntity(MaskedArrayExpr &maskedExpr) { if (!maskedExpr.noneElementalPartWasGenerated) { // Generate none elemental part before the where loops (but inside the // current forall loops if any). - builder.setInsertionPoint(whereLoopNest->outerLoop); + builder.setInsertionPoint(whereLoopNest->outerOp); maskedExpr.generateNoneElementalPart(builder, mapper); } // Generate the none elemental part cleanup after the where loops. - builder.setInsertionPointAfter(whereLoopNest->outerLoop); + builder.setInsertionPointAfter(whereLoopNest->outerOp); maskedExpr.generateNoneElementalCleanupIfAny(builder, mapper); // Generate the value of the current element for the masked expression // at the current insertion point (inside the where loops, and any fir.if @@ -1242,7 +1240,7 @@ void OrderedAssignmentRewriter::saveLeftHandSide( LhsValueAndCleanUp loweredLhs = generateYieldedLHS(loc, region); fir::factory::TemporaryStorage *temp = nullptr; if (loweredLhs.vectorSubscriptLoopNest) - constructStack.push_back(loweredLhs.vectorSubscriptLoopNest->outerLoop); + constructStack.push_back(loweredLhs.vectorSubscriptLoopNest->outerOp); if (loweredLhs.vectorSubscriptLoopNest && !rhsIsArray(regionAssignOp)) { // Vector subscripted entity for which the shape must also be saved on top // of the element addresses (e.g. the shape may change in each forall @@ -1265,7 +1263,7 @@ void OrderedAssignmentRewriter::saveLeftHandSide( // subscripted LHS. auto &vectorTmp = temp->cast(); auto insertionPoint = builder.saveInsertionPoint(); - builder.setInsertionPoint(loweredLhs.vectorSubscriptLoopNest->outerLoop); + builder.setInsertionPoint(loweredLhs.vectorSubscriptLoopNest->outerOp); vectorTmp.pushShape(loc, builder, shape); builder.restoreInsertionPoint(insertionPoint); } else { @@ -1290,8 +1288,7 @@ void OrderedAssignmentRewriter::saveLeftHandSide( generateCleanupIfAny(loweredLhs.elementalCleanup); if (loweredLhs.vectorSubscriptLoopNest) { constructStack.pop_back(); - builder.setInsertionPointAfter( - loweredLhs.vectorSubscriptLoopNest->outerLoop); + builder.setInsertionPointAfter(loweredLhs.vectorSubscriptLoopNest->outerOp); } } diff --git a/flang/lib/Optimizer/HLFIR/Transforms/OptimizedBufferization.cpp b/flang/lib/Optimizer/HLFIR/Transforms/OptimizedBufferization.cpp index 7553e05b47063..3a0a98dc59446 100644 --- a/flang/lib/Optimizer/HLFIR/Transforms/OptimizedBufferization.cpp +++ b/flang/lib/Optimizer/HLFIR/Transforms/OptimizedBufferization.cpp @@ -483,7 +483,7 @@ llvm::LogicalResult ElementalAssignBufferization::matchAndRewrite( // hlfir.elemental region inside the inner loop hlfir::LoopNest loopNest = hlfir::genLoopNest(loc, builder, extents, !elemental.isOrdered()); - builder.setInsertionPointToStart(loopNest.innerLoop.getBody()); + builder.setInsertionPointToStart(loopNest.body); auto yield = hlfir::inlineElementalOp(loc, builder, elemental, loopNest.oneBasedIndices); hlfir::Entity elementValue{yield.getElementValue()}; @@ -554,7 +554,7 @@ llvm::LogicalResult BroadcastAssignBufferization::matchAndRewrite( hlfir::getIndexExtents(loc, builder, shape); hlfir::LoopNest loopNest = hlfir::genLoopNest(loc, builder, extents, /*isUnordered=*/true); - builder.setInsertionPointToStart(loopNest.innerLoop.getBody()); + builder.setInsertionPointToStart(loopNest.body); auto arrayElement = hlfir::getElementAt(loc, builder, lhs, loopNest.oneBasedIndices); builder.create(loc, rhs, arrayElement); @@ -649,7 +649,7 @@ llvm::LogicalResult VariableAssignBufferization::matchAndRewrite( hlfir::getIndexExtents(loc, builder, shape); hlfir::LoopNest loopNest = hlfir::genLoopNest(loc, builder, extents, /*isUnordered=*/true); - builder.setInsertionPointToStart(loopNest.innerLoop.getBody()); + builder.setInsertionPointToStart(loopNest.body); auto rhsArrayElement = hlfir::getElementAt(loc, builder, rhs, loopNest.oneBasedIndices); rhsArrayElement = hlfir::loadTrivialScalar(loc, builder, rhsArrayElement); From d8cfd384bb47d38c09ed4e1a1cd598b891d0915b Mon Sep 17 00:00:00 2001 From: Ivan Radanov Ivanov Date: Sun, 20 Oct 2024 01:35:01 +0900 Subject: [PATCH 08/34] genLoopNest fix --- flang/lib/Optimizer/Builder/HLFIRTools.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/flang/lib/Optimizer/Builder/HLFIRTools.cpp b/flang/lib/Optimizer/Builder/HLFIRTools.cpp index 333331378841e..7425ccf7fc0e3 100644 --- a/flang/lib/Optimizer/Builder/HLFIRTools.cpp +++ b/flang/lib/Optimizer/Builder/HLFIRTools.cpp @@ -878,7 +878,6 @@ hlfir::LoopNest hlfir::genLoopNest(mlir::Location loc, lnops.loopSteps.push_back(one); } auto lnOp = builder.create(loc, lnops); - builder.create(loc); mlir::Block *block = builder.createBlock(&lnOp.getRegion()); for (auto extent : llvm::reverse(extents)) block->addArgument(extent.getType(), extent.getLoc()); From 568abc28ee32e4ee05190d32ef86ff73215dbaa8 Mon Sep 17 00:00:00 2001 From: Ivan Radanov Ivanov Date: Sun, 4 Aug 2024 22:06:55 +0900 Subject: [PATCH 09/34] [flang] Lower omp.workshare to other omp constructs Change to workshare loop wrapper op Move single op declaration Schedule pass properly Correctly handle nested nested loop nests to be parallelized by workshare Leave comments for shouldUseWorkshareLowering Use copyprivate to scatter val from omp.single TODO still need to implement copy function TODO transitive check for usage outside of omp.single not imiplemented yet Transitively check for users outisde of single op TODO need to implement copy func TODO need to hoist allocas outside of single regions Add tests Hoist allocas More tests Emit body for copy func Test the tmp storing logic Clean up trivially dead ops Only handle single-block regions for now Fix tests for custom assembly for loop wrapper Only run the lower workshare pass if openmp is enabled Implement some missing functionality Fix tests Fix test Iterate backwards to find all trivially dead ops Add expalanation comment for createCopyFun Update test --- flang/include/flang/Optimizer/OpenMP/Passes.h | 5 + .../include/flang/Optimizer/OpenMP/Passes.td | 5 + flang/include/flang/Tools/CrossToolHelpers.h | 1 + flang/lib/Frontend/FrontendActions.cpp | 10 +- flang/lib/Optimizer/OpenMP/CMakeLists.txt | 1 + flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp | 446 ++++++++++++++++++ flang/lib/Optimizer/Passes/Pipelines.cpp | 6 +- flang/test/Fir/basic-program.fir | 1 + .../Transforms/OpenMP/lower-workshare.mlir | 189 ++++++++ .../Transforms/OpenMP/lower-workshare2.mlir | 23 + .../Transforms/OpenMP/lower-workshare3.mlir | 74 +++ .../Transforms/OpenMP/lower-workshare4.mlir | 59 +++ .../Transforms/OpenMP/lower-workshare5.mlir | 42 ++ .../Transforms/OpenMP/lower-workshare6.mlir | 51 ++ flang/tools/bbc/bbc.cpp | 5 +- flang/tools/tco/tco.cpp | 1 + 16 files changed, 915 insertions(+), 4 deletions(-) create mode 100644 flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp create mode 100644 flang/test/Transforms/OpenMP/lower-workshare.mlir create mode 100644 flang/test/Transforms/OpenMP/lower-workshare2.mlir create mode 100644 flang/test/Transforms/OpenMP/lower-workshare3.mlir create mode 100644 flang/test/Transforms/OpenMP/lower-workshare4.mlir create mode 100644 flang/test/Transforms/OpenMP/lower-workshare5.mlir create mode 100644 flang/test/Transforms/OpenMP/lower-workshare6.mlir diff --git a/flang/include/flang/Optimizer/OpenMP/Passes.h b/flang/include/flang/Optimizer/OpenMP/Passes.h index 403d79667bf44..feb395f1a12db 100644 --- a/flang/include/flang/Optimizer/OpenMP/Passes.h +++ b/flang/include/flang/Optimizer/OpenMP/Passes.h @@ -25,6 +25,11 @@ namespace flangomp { #define GEN_PASS_REGISTRATION #include "flang/Optimizer/OpenMP/Passes.h.inc" +/// Impelements the logic specified in the 2.8.3 workshare Construct section of +/// the OpenMP standard which specifies what statements or constructs shall be +/// divided into units of work. +bool shouldUseWorkshareLowering(mlir::Operation *op); + } // namespace flangomp #endif // FORTRAN_OPTIMIZER_OPENMP_PASSES_H diff --git a/flang/include/flang/Optimizer/OpenMP/Passes.td b/flang/include/flang/Optimizer/OpenMP/Passes.td index 1c0ce08f5b483..dc1956bea9fb2 100644 --- a/flang/include/flang/Optimizer/OpenMP/Passes.td +++ b/flang/include/flang/Optimizer/OpenMP/Passes.td @@ -37,4 +37,9 @@ def FunctionFilteringPass : Pass<"omp-function-filtering"> { ]; } +// Needs to be scheduled on Module as we create functions in it +def LowerWorkshare : Pass<"lower-workshare", "::mlir::ModuleOp"> { + let summary = "Lower workshare construct"; +} + #endif //FORTRAN_OPTIMIZER_OPENMP_PASSES diff --git a/flang/include/flang/Tools/CrossToolHelpers.h b/flang/include/flang/Tools/CrossToolHelpers.h index df4b21ada058f..d936b739e5815 100644 --- a/flang/include/flang/Tools/CrossToolHelpers.h +++ b/flang/include/flang/Tools/CrossToolHelpers.h @@ -123,6 +123,7 @@ struct MLIRToLLVMPassPipelineConfig : public FlangEPCallBacks { false; ///< Set no-signed-zeros-fp-math attribute for functions. bool UnsafeFPMath = false; ///< Set unsafe-fp-math attribute for functions. bool NSWOnLoopVarInc = false; ///< Add nsw flag to loop variable increments. + bool EnableOpenMP = false; ///< Enable OpenMP lowering. }; struct OffloadModuleOpts { diff --git a/flang/lib/Frontend/FrontendActions.cpp b/flang/lib/Frontend/FrontendActions.cpp index f2e460fc53a67..8c21fe18e67b4 100644 --- a/flang/lib/Frontend/FrontendActions.cpp +++ b/flang/lib/Frontend/FrontendActions.cpp @@ -715,7 +715,11 @@ void CodeGenAction::lowerHLFIRToFIR() { pm.enableVerifier(/*verifyPasses=*/true); // Create the pass pipeline - fir::createHLFIRToFIRPassPipeline(pm, level); + fir::createHLFIRToFIRPassPipeline( + pm, + ci.getInvocation().getFrontendOpts().features.IsEnabled( + Fortran::common::LanguageFeature::OpenMP), + level); (void)mlir::applyPassManagerCLOptions(pm); if (!mlir::succeeded(pm.run(*mlirModule))) { @@ -828,6 +832,10 @@ void CodeGenAction::generateLLVMIR() { config.VScaleMax = vsr->second; } + if (ci.getInvocation().getFrontendOpts().features.IsEnabled( + Fortran::common::LanguageFeature::OpenMP)) + config.EnableOpenMP = true; + if (ci.getInvocation().getLoweringOpts().getNSWOnLoopVarInc()) config.NSWOnLoopVarInc = true; diff --git a/flang/lib/Optimizer/OpenMP/CMakeLists.txt b/flang/lib/Optimizer/OpenMP/CMakeLists.txt index 92051634f0378..39e92d388288d 100644 --- a/flang/lib/Optimizer/OpenMP/CMakeLists.txt +++ b/flang/lib/Optimizer/OpenMP/CMakeLists.txt @@ -4,6 +4,7 @@ add_flang_library(FlangOpenMPTransforms FunctionFiltering.cpp MapInfoFinalization.cpp MarkDeclareTarget.cpp + LowerWorkshare.cpp DEPENDS FIRDialect diff --git a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp new file mode 100644 index 0000000000000..6e5538b54ba5e --- /dev/null +++ b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp @@ -0,0 +1,446 @@ +//===- LowerWorkshare.cpp - special cases for bufferization -------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file implements the lowering of omp.workshare to other omp constructs. +// +// This pass is tasked with parallelizing the loops nested in +// workshare.loop_wrapper while both the Fortran to mlir lowering and the hlfir +// to fir lowering pipelines are responsible for emitting the +// workshare.loop_wrapper ops where appropriate according to the +// `shouldUseWorkshareLowering` function. +// +//===----------------------------------------------------------------------===// + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace flangomp { +#define GEN_PASS_DEF_LOWERWORKSHARE +#include "flang/Optimizer/OpenMP/Passes.h.inc" +} // namespace flangomp + +#define DEBUG_TYPE "lower-workshare" + +using namespace mlir; + +namespace flangomp { + +// Checks for nesting pattern below as we need to avoid sharing the work of +// statements which are nested in some constructs such as omp.critical or +// another omp.parallel. +// +// omp.workshare { // `wsOp` +// ... +// omp.T { // `parent` +// ... +// `op` +// +template +static bool isNestedIn(omp::WorkshareOp wsOp, Operation *op) { + T parent = op->getParentOfType(); + if (!parent) + return false; + return wsOp->isProperAncestor(parent); +} + +bool shouldUseWorkshareLowering(Operation *op) { + auto parentWorkshare = op->getParentOfType(); + + if (!parentWorkshare) + return false; + + if (isNestedIn(parentWorkshare, op)) + return false; + + // 2.8.3 workshare Construct + // For a parallel construct, the construct is a unit of work with respect to + // the workshare construct. The statements contained in the parallel construct + // are executed by a new thread team. + if (isNestedIn(parentWorkshare, op)) + return false; + + // 2.8.2 single Construct + // Binding The binding thread set for a single region is the current team. A + // single region binds to the innermost enclosing parallel region. + // Description Only one of the encountering threads will execute the + // structured block associated with the single construct. + if (isNestedIn(parentWorkshare, op)) + return false; + + return true; +} + +} // namespace flangomp + +namespace { + +struct SingleRegion { + Block::iterator begin, end; +}; + +static bool mustParallelizeOp(Operation *op) { + return op + ->walk([&](Operation *nested) { + // We need to be careful not to pick up workshare.loop_wrapper in nested + // omp.parallel{omp.workshare} regions, i.e. make sure that `nested` + // binds to the workshare region we are currently handling. + // + // For example: + // + // omp.parallel { + // omp.workshare { // currently handling this + // omp.parallel { + // omp.workshare { // nested workshare + // omp.workshare.loop_wrapper {} + // + // Therefore, we skip if we encounter a nested omp.workshare. + if (isa(op)) + return WalkResult::skip(); + if (isa(op)) + return WalkResult::interrupt(); + return WalkResult::advance(); + }) + .wasInterrupted(); +} + +static bool isSafeToParallelize(Operation *op) { + return isa(op) || isa(op) || + isMemoryEffectFree(op); +} + +/// Simple shallow copies suffice for our purposes in this pass, so we implement +/// this simpler alternative to the full fledged `createCopyFunc` in the +/// frontend +static mlir::func::FuncOp createCopyFunc(mlir::Location loc, mlir::Type varType, + fir::FirOpBuilder builder) { + mlir::ModuleOp module = builder.getModule(); + auto rt = cast(varType); + mlir::Type eleTy = rt.getEleTy(); + std::string copyFuncName = + fir::getTypeAsString(eleTy, builder.getKindMap(), "_workshare_copy"); + + if (auto decl = module.lookupSymbol(copyFuncName)) + return decl; + // create function + mlir::OpBuilder::InsertionGuard guard(builder); + mlir::OpBuilder modBuilder(module.getBodyRegion()); + llvm::SmallVector argsTy = {varType, varType}; + auto funcType = mlir::FunctionType::get(builder.getContext(), argsTy, {}); + mlir::func::FuncOp funcOp = + modBuilder.create(loc, copyFuncName, funcType); + funcOp.setVisibility(mlir::SymbolTable::Visibility::Private); + builder.createBlock(&funcOp.getRegion(), funcOp.getRegion().end(), argsTy, + {loc, loc}); + builder.setInsertionPointToStart(&funcOp.getRegion().back()); + + Value loaded = builder.create(loc, funcOp.getArgument(0)); + builder.create(loc, loaded, funcOp.getArgument(1)); + + builder.create(loc); + return funcOp; +} + +static bool isUserOutsideSR(Operation *user, Operation *parentOp, + SingleRegion sr) { + while (user->getParentOp() != parentOp) + user = user->getParentOp(); + return sr.begin->getBlock() != user->getBlock() || + !(user->isBeforeInBlock(&*sr.end) && sr.begin->isBeforeInBlock(user)); +} + +static bool isTransitivelyUsedOutside(Value v, SingleRegion sr) { + Block *srBlock = sr.begin->getBlock(); + Operation *parentOp = srBlock->getParentOp(); + + for (auto &use : v.getUses()) { + Operation *user = use.getOwner(); + if (isUserOutsideSR(user, parentOp, sr)) + return true; + + // Results of nested users cannot be used outside of the SR + if (user->getBlock() != srBlock) + continue; + + // A non-safe to parallelize operation will be handled separately + if (!isSafeToParallelize(user)) + continue; + + for (auto res : user->getResults()) + if (isTransitivelyUsedOutside(res, sr)) + return true; + } + return false; +} + +/// We clone pure operations in both the parallel and single blocks. this +/// functions cleans them up if they end up with no uses +static void cleanupBlock(Block *block) { + for (Operation &op : llvm::make_early_inc_range( + llvm::make_range(block->rbegin(), block->rend()))) + if (isOpTriviallyDead(&op)) + op.erase(); +} + +static void parallelizeRegion(Region &sourceRegion, Region &targetRegion, + IRMapping &rootMapping, Location loc, + mlir::DominanceInfo &di) { + OpBuilder rootBuilder(sourceRegion.getContext()); + ModuleOp m = sourceRegion.getParentOfType(); + OpBuilder copyFuncBuilder(m.getBodyRegion()); + fir::FirOpBuilder firCopyFuncBuilder(copyFuncBuilder, m); + + auto mapReloadedValue = + [&](Value v, OpBuilder allocaBuilder, OpBuilder singleBuilder, + OpBuilder parallelBuilder, IRMapping singleMapping) -> Value { + if (auto reloaded = rootMapping.lookupOrNull(v)) + return nullptr; + Type ty = v.getType(); + Value alloc = allocaBuilder.create(loc, ty); + singleBuilder.create(loc, singleMapping.lookup(v), alloc); + Value reloaded = parallelBuilder.create(loc, ty, alloc); + rootMapping.map(v, reloaded); + return alloc; + }; + + auto moveToSingle = [&](SingleRegion sr, OpBuilder allocaBuilder, + OpBuilder singleBuilder, + OpBuilder parallelBuilder) -> SmallVector { + IRMapping singleMapping = rootMapping; + SmallVector copyPrivate; + + for (Operation &op : llvm::make_range(sr.begin, sr.end)) { + if (isSafeToParallelize(&op)) { + singleBuilder.clone(op, singleMapping); + parallelBuilder.clone(op, rootMapping); + } else if (auto alloca = dyn_cast(&op)) { + auto hoisted = + cast(allocaBuilder.clone(*alloca, singleMapping)); + rootMapping.map(&*alloca, &*hoisted); + rootMapping.map(alloca.getResult(), hoisted.getResult()); + copyPrivate.push_back(hoisted); + } else { + singleBuilder.clone(op, singleMapping); + // Prepare reloaded values for results of operations that cannot be + // safely parallelized and which are used after the region `sr` + for (auto res : op.getResults()) { + if (isTransitivelyUsedOutside(res, sr)) { + auto alloc = mapReloadedValue(res, allocaBuilder, singleBuilder, + parallelBuilder, singleMapping); + if (alloc) + copyPrivate.push_back(alloc); + } + } + } + } + singleBuilder.create(loc); + return copyPrivate; + }; + + for (Block &block : sourceRegion) { + Block *targetBlock = rootBuilder.createBlock( + &targetRegion, {}, block.getArgumentTypes(), + llvm::map_to_vector(block.getArguments(), + [](BlockArgument arg) { return arg.getLoc(); })); + rootMapping.map(&block, targetBlock); + rootMapping.map(block.getArguments(), targetBlock->getArguments()); + } + + auto handleOneBlock = [&](Block &block) { + Block &targetBlock = *rootMapping.lookup(&block); + rootBuilder.setInsertionPointToStart(&targetBlock); + Operation *terminator = block.getTerminator(); + SmallVector> regions; + + auto it = block.begin(); + auto getOneRegion = [&]() { + if (&*it == terminator) + return false; + if (mustParallelizeOp(&*it)) { + regions.push_back(&*it); + it++; + return true; + } + SingleRegion sr; + sr.begin = it; + while (&*it != terminator && !mustParallelizeOp(&*it)) + it++; + sr.end = it; + assert(sr.begin != sr.end); + regions.push_back(sr); + return true; + }; + while (getOneRegion()) + ; + + for (auto [i, opOrSingle] : llvm::enumerate(regions)) { + bool isLast = i + 1 == regions.size(); + if (std::holds_alternative(opOrSingle)) { + OpBuilder singleBuilder(sourceRegion.getContext()); + Block *singleBlock = new Block(); + singleBuilder.setInsertionPointToStart(singleBlock); + + OpBuilder allocaBuilder(sourceRegion.getContext()); + Block *allocaBlock = new Block(); + allocaBuilder.setInsertionPointToStart(allocaBlock); + + OpBuilder parallelBuilder(sourceRegion.getContext()); + Block *parallelBlock = new Block(); + parallelBuilder.setInsertionPointToStart(parallelBlock); + + omp::SingleOperands singleOperands; + if (isLast) + singleOperands.nowait = rootBuilder.getUnitAttr(); + singleOperands.copyprivateVars = + moveToSingle(std::get(opOrSingle), allocaBuilder, + singleBuilder, parallelBuilder); + cleanupBlock(singleBlock); + for (auto var : singleOperands.copyprivateVars) { + mlir::func::FuncOp funcOp = + createCopyFunc(loc, var.getType(), firCopyFuncBuilder); + singleOperands.copyprivateSyms.push_back(SymbolRefAttr::get(funcOp)); + } + omp::SingleOp singleOp = + rootBuilder.create(loc, singleOperands); + singleOp.getRegion().push_back(singleBlock); + rootBuilder.getInsertionBlock()->getOperations().splice( + rootBuilder.getInsertionPoint(), parallelBlock->getOperations()); + targetRegion.front().getOperations().splice( + singleOp->getIterator(), allocaBlock->getOperations()); + delete allocaBlock; + delete parallelBlock; + } else { + auto op = std::get(opOrSingle); + if (auto wslw = dyn_cast(op)) { + omp::WsloopOperands wsloopOperands; + if (isLast) + wsloopOperands.nowait = rootBuilder.getUnitAttr(); + auto wsloop = + rootBuilder.create(loc, wsloopOperands); + auto clonedWslw = cast( + rootBuilder.clone(*wslw, rootMapping)); + wsloop.getRegion().takeBody(clonedWslw.getRegion()); + clonedWslw->erase(); + } else { + assert(mustParallelizeOp(op)); + Operation *cloned = rootBuilder.cloneWithoutRegions(*op, rootMapping); + for (auto [region, clonedRegion] : + llvm::zip(op->getRegions(), cloned->getRegions())) + parallelizeRegion(region, clonedRegion, rootMapping, loc, di); + } + } + } + + rootBuilder.clone(*block.getTerminator(), rootMapping); + }; + + if (sourceRegion.hasOneBlock()) { + handleOneBlock(sourceRegion.front()); + } else { + auto &domTree = di.getDomTree(&sourceRegion); + for (auto node : llvm::breadth_first(domTree.getRootNode())) { + handleOneBlock(*node->getBlock()); + } + } + + for (Block &targetBlock : targetRegion) + cleanupBlock(&targetBlock); +} + +/// Lowers workshare to a sequence of single-thread regions and parallel loops +/// +/// For example: +/// +/// omp.workshare { +/// %a = fir.allocmem +/// omp.workshare.loop_wrapper {} +/// fir.call Assign %b %a +/// fir.freemem %a +/// } +/// +/// becomes +/// +/// %tmp = fir.alloca +/// omp.single copyprivate(%tmp) { +/// %a = fir.allocmem +/// fir.store %a %tmp +/// } +/// %a_reloaded = fir.load %tmp +/// omp.workshare.loop_wrapper {} +/// omp.single { +/// fir.call Assign %b %a_reloaded +/// fir.freemem %a_reloaded +/// } +/// +/// Note that we allocate temporary memory for values in omp.single's which need +/// to be accessed by all threads and broadcast them using single's copyprivate +LogicalResult lowerWorkshare(mlir::omp::WorkshareOp wsOp, DominanceInfo &di) { + Location loc = wsOp->getLoc(); + IRMapping rootMapping; + + OpBuilder rootBuilder(wsOp); + + // This operation is just a placeholder which will be erased later. We need it + // because our `parallelizeRegion` function works on regions and not blocks. + omp::WorkshareOp newOp = + rootBuilder.create(loc, omp::WorkshareOperands()); + if (!wsOp.getNowait()) + rootBuilder.create(loc); + + parallelizeRegion(wsOp.getRegion(), newOp.getRegion(), rootMapping, loc, di); + + if (wsOp.getRegion().getBlocks().size() != 1) + return failure(); + + // Inline the contents of the placeholder workshare op into its parent block. + Block *theBlock = &newOp.getRegion().front(); + Operation *term = theBlock->getTerminator(); + Block *parentBlock = wsOp->getBlock(); + parentBlock->getOperations().splice(newOp->getIterator(), + theBlock->getOperations()); + assert(term->getNumOperands() == 0); + term->erase(); + newOp->erase(); + wsOp->erase(); + return success(); +} + +class LowerWorksharePass + : public flangomp::impl::LowerWorkshareBase { +public: + void runOnOperation() override { + mlir::DominanceInfo &di = getAnalysis(); + getOperation()->walk([&](mlir::omp::WorkshareOp wsOp) { + if (failed(lowerWorkshare(wsOp, di))) + signalPassFailure(); + }); + } +}; +} // namespace diff --git a/flang/lib/Optimizer/Passes/Pipelines.cpp b/flang/lib/Optimizer/Passes/Pipelines.cpp index 3fa5c54403bd8..c1a5902b74788 100644 --- a/flang/lib/Optimizer/Passes/Pipelines.cpp +++ b/flang/lib/Optimizer/Passes/Pipelines.cpp @@ -212,7 +212,7 @@ void createDefaultFIROptimizerPassPipeline(mlir::PassManager &pm, /// \param pm - MLIR pass manager that will hold the pipeline definition /// \param optLevel - optimization level used for creating FIR optimization /// passes pipeline -void createHLFIRToFIRPassPipeline(mlir::PassManager &pm, +void createHLFIRToFIRPassPipeline(mlir::PassManager &pm, bool enableOpenMP, llvm::OptimizationLevel optLevel) { if (optLevel.isOptimizingForSpeed()) { addCanonicalizerPassWithoutRegionSimplification(pm); @@ -230,6 +230,8 @@ void createHLFIRToFIRPassPipeline(mlir::PassManager &pm, pm.addPass(hlfir::createLowerHLFIRIntrinsics()); pm.addPass(hlfir::createBufferizeHLFIR()); pm.addPass(hlfir::createConvertHLFIRtoFIR()); + if (enableOpenMP) + pm.addPass(flangomp::createLowerWorkshare()); } /// Create a pass pipeline for handling certain OpenMP transformations needed @@ -302,7 +304,7 @@ void createDefaultFIRCodeGenPassPipeline(mlir::PassManager &pm, void createMLIRToLLVMPassPipeline(mlir::PassManager &pm, MLIRToLLVMPassPipelineConfig &config, llvm::StringRef inputFilename) { - fir::createHLFIRToFIRPassPipeline(pm, config.OptLevel); + fir::createHLFIRToFIRPassPipeline(pm, config.EnableOpenMP, config.OptLevel); // Add default optimizer pass pipeline. fir::createDefaultFIROptimizerPassPipeline(pm, config); diff --git a/flang/test/Fir/basic-program.fir b/flang/test/Fir/basic-program.fir index bca454c13ff9c..4b18acb7c2b43 100644 --- a/flang/test/Fir/basic-program.fir +++ b/flang/test/Fir/basic-program.fir @@ -47,6 +47,7 @@ func.func @_QQmain() { // PASSES-NEXT: LowerHLFIRIntrinsics // PASSES-NEXT: BufferizeHLFIR // PASSES-NEXT: ConvertHLFIRtoFIR +// PASSES-NEXT: LowerWorkshare // PASSES-NEXT: CSE // PASSES-NEXT: (S) 0 num-cse'd - Number of operations CSE'd // PASSES-NEXT: (S) 0 num-dce'd - Number of operations DCE'd diff --git a/flang/test/Transforms/OpenMP/lower-workshare.mlir b/flang/test/Transforms/OpenMP/lower-workshare.mlir new file mode 100644 index 0000000000000..a609ee5d3d6c2 --- /dev/null +++ b/flang/test/Transforms/OpenMP/lower-workshare.mlir @@ -0,0 +1,189 @@ +// RUN: fir-opt --split-input-file --lower-workshare --allow-unregistered-dialect %s | FileCheck %s + +// checks: +// nowait on final omp.single +func.func @wsfunc(%arg0: !fir.ref>) { + omp.parallel { + omp.workshare { + %c42 = arith.constant 42 : index + %c1_i32 = arith.constant 1 : i32 + %0 = fir.shape %c42 : (index) -> !fir.shape<1> + %1:2 = hlfir.declare %arg0(%0) {uniq_name = "array"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) + %2 = fir.allocmem !fir.array<42xi32> {bindc_name = ".tmp.array", uniq_name = ""} + %3:2 = hlfir.declare %2(%0) {uniq_name = ".tmp.array"} : (!fir.heap>, !fir.shape<1>) -> (!fir.heap>, !fir.heap>) + %true = arith.constant true + %c1 = arith.constant 1 : index + omp.workshare.loop_wrapper { + omp.loop_nest (%arg1) : index = (%c1) to (%c42) inclusive step (%c1) { + %7 = hlfir.designate %1#0 (%arg1) : (!fir.ref>, index) -> !fir.ref + %8 = fir.load %7 : !fir.ref + %9 = arith.subi %8, %c1_i32 : i32 + %10 = hlfir.designate %3#0 (%arg1) : (!fir.heap>, index) -> !fir.ref + hlfir.assign %9 to %10 temporary_lhs : i32, !fir.ref + omp.yield + } + omp.terminator + } + %4 = fir.undefined tuple>, i1> + %5 = fir.insert_value %4, %true, [1 : index] : (tuple>, i1>, i1) -> tuple>, i1> + %6 = fir.insert_value %5, %3#0, [0 : index] : (tuple>, i1>, !fir.heap>) -> tuple>, i1> + hlfir.assign %3#0 to %1#0 : !fir.heap>, !fir.ref> + fir.freemem %3#0 : !fir.heap> + omp.terminator + } + omp.terminator + } + return +} + +// ----- + +// checks: +// fir.alloca hoisted out and copyprivate'd +func.func @wsfunc(%arg0: !fir.ref>) { + omp.workshare { + %c1_i32 = arith.constant 1 : i32 + %alloc = fir.alloca i32 + fir.store %c1_i32 to %alloc : !fir.ref + %c42 = arith.constant 42 : index + %0 = fir.shape %c42 : (index) -> !fir.shape<1> + %1:2 = hlfir.declare %arg0(%0) {uniq_name = "array"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) + %2 = fir.allocmem !fir.array<42xi32> {bindc_name = ".tmp.array", uniq_name = ""} + %3:2 = hlfir.declare %2(%0) {uniq_name = ".tmp.array"} : (!fir.heap>, !fir.shape<1>) -> (!fir.heap>, !fir.heap>) + %true = arith.constant true + %c1 = arith.constant 1 : index + omp.workshare.loop_wrapper { + omp.loop_nest (%arg1) : index = (%c1) to (%c42) inclusive step (%c1) { + %7 = hlfir.designate %1#0 (%arg1) : (!fir.ref>, index) -> !fir.ref + %8 = fir.load %7 : !fir.ref + %ld = fir.load %alloc : !fir.ref + %n8 = arith.subi %8, %ld : i32 + %9 = arith.subi %n8, %c1_i32 : i32 + %10 = hlfir.designate %3#0 (%arg1) : (!fir.heap>, index) -> !fir.ref + hlfir.assign %9 to %10 temporary_lhs : i32, !fir.ref + omp.yield + } + omp.terminator + } + %4 = fir.undefined tuple>, i1> + %5 = fir.insert_value %4, %true, [1 : index] : (tuple>, i1>, i1) -> tuple>, i1> + %6 = fir.insert_value %5, %3#0, [0 : index] : (tuple>, i1>, !fir.heap>) -> tuple>, i1> + "test.test1"(%alloc) : (!fir.ref) -> () + hlfir.assign %3#0 to %1#0 : !fir.heap>, !fir.ref> + fir.freemem %3#0 : !fir.heap> + omp.terminator + } + return +} + +// CHECK-LABEL: func.func private @_workshare_copy_heap_42xi32( +// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref>>, +// CHECK-SAME: %[[VAL_1:.*]]: !fir.ref>>) { +// CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref>> +// CHECK: fir.store %[[VAL_2]] to %[[VAL_1]] : !fir.ref>> +// CHECK: return +// CHECK: } + +// CHECK-LABEL: func.func @wsfunc( +// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref>) { +// CHECK: omp.parallel { +// CHECK: %[[VAL_1:.*]] = fir.alloca !fir.heap> +// CHECK: omp.single copyprivate(%[[VAL_1]] -> @_workshare_copy_heap_42xi32 : !fir.ref>>) { +// CHECK: %[[VAL_2:.*]] = arith.constant 42 : index +// CHECK: %[[VAL_3:.*]] = fir.shape %[[VAL_2]] : (index) -> !fir.shape<1> +// CHECK: %[[VAL_4:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_3]]) {uniq_name = "array"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) +// CHECK: %[[VAL_5:.*]] = fir.allocmem !fir.array<42xi32> {bindc_name = ".tmp.array", uniq_name = ""} +// CHECK: fir.store %[[VAL_5]] to %[[VAL_1]] : !fir.ref>> +// CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]](%[[VAL_3]]) {uniq_name = ".tmp.array"} : (!fir.heap>, !fir.shape<1>) -> (!fir.heap>, !fir.heap>) +// CHECK: omp.terminator +// CHECK: } +// CHECK: %[[VAL_7:.*]] = arith.constant 42 : index +// CHECK: %[[VAL_8:.*]] = arith.constant 1 : i32 +// CHECK: %[[VAL_9:.*]] = fir.shape %[[VAL_7]] : (index) -> !fir.shape<1> +// CHECK: %[[VAL_10:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_9]]) {uniq_name = "array"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) +// CHECK: %[[VAL_11:.*]] = fir.load %[[VAL_1]] : !fir.ref>> +// CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]](%[[VAL_9]]) {uniq_name = ".tmp.array"} : (!fir.heap>, !fir.shape<1>) -> (!fir.heap>, !fir.heap>) +// CHECK: %[[VAL_13:.*]] = arith.constant 1 : index +// CHECK: omp.wsloop { +// CHECK: omp.loop_nest (%[[VAL_14:.*]]) : index = (%[[VAL_13]]) to (%[[VAL_7]]) inclusive step (%[[VAL_13]]) { +// CHECK: %[[VAL_15:.*]] = hlfir.designate %[[VAL_10]]#0 (%[[VAL_14]]) : (!fir.ref>, index) -> !fir.ref +// CHECK: %[[VAL_16:.*]] = fir.load %[[VAL_15]] : !fir.ref +// CHECK: %[[VAL_17:.*]] = arith.subi %[[VAL_16]], %[[VAL_8]] : i32 +// CHECK: %[[VAL_18:.*]] = hlfir.designate %[[VAL_12]]#0 (%[[VAL_14]]) : (!fir.heap>, index) -> !fir.ref +// CHECK: hlfir.assign %[[VAL_17]] to %[[VAL_18]] temporary_lhs : i32, !fir.ref +// CHECK: omp.yield +// CHECK: } +// CHECK: omp.terminator +// CHECK: } +// CHECK: omp.single nowait { +// CHECK: hlfir.assign %[[VAL_12]]#0 to %[[VAL_10]]#0 : !fir.heap>, !fir.ref> +// CHECK: fir.freemem %[[VAL_12]]#0 : !fir.heap> +// CHECK: omp.terminator +// CHECK: } +// CHECK: omp.barrier +// CHECK: omp.terminator +// CHECK: } +// CHECK: return +// CHECK: } + +// CHECK-LABEL: func.func private @_workshare_copy_heap_42xi32( +// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref>>, +// CHECK-SAME: %[[VAL_1:.*]]: !fir.ref>>) { +// CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref>> +// CHECK: fir.store %[[VAL_2]] to %[[VAL_1]] : !fir.ref>> +// CHECK: return +// CHECK: } + +// CHECK-LABEL: func.func private @_workshare_copy_i32( +// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref, +// CHECK-SAME: %[[VAL_1:.*]]: !fir.ref) { +// CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref +// CHECK: fir.store %[[VAL_2]] to %[[VAL_1]] : !fir.ref +// CHECK: return +// CHECK: } + +// CHECK-LABEL: func.func @wsfunc( +// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref>) { +// CHECK: %[[VAL_1:.*]] = fir.alloca i32 +// CHECK: %[[VAL_2:.*]] = fir.alloca !fir.heap> +// CHECK: omp.single copyprivate(%[[VAL_1]] -> @_workshare_copy_i32 : !fir.ref, %[[VAL_2]] -> @_workshare_copy_heap_42xi32 : !fir.ref>>) { +// CHECK: %[[VAL_3:.*]] = arith.constant 1 : i32 +// CHECK: fir.store %[[VAL_3]] to %[[VAL_1]] : !fir.ref +// CHECK: %[[VAL_4:.*]] = arith.constant 42 : index +// CHECK: %[[VAL_5:.*]] = fir.shape %[[VAL_4]] : (index) -> !fir.shape<1> +// CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_5]]) {uniq_name = "array"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) +// CHECK: %[[VAL_7:.*]] = fir.allocmem !fir.array<42xi32> {bindc_name = ".tmp.array", uniq_name = ""} +// CHECK: fir.store %[[VAL_7]] to %[[VAL_2]] : !fir.ref>> +// CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]](%[[VAL_5]]) {uniq_name = ".tmp.array"} : (!fir.heap>, !fir.shape<1>) -> (!fir.heap>, !fir.heap>) +// CHECK: omp.terminator +// CHECK: } +// CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 +// CHECK: %[[VAL_10:.*]] = arith.constant 42 : index +// CHECK: %[[VAL_11:.*]] = fir.shape %[[VAL_10]] : (index) -> !fir.shape<1> +// CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_11]]) {uniq_name = "array"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) +// CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_2]] : !fir.ref>> +// CHECK: %[[VAL_14:.*]]:2 = hlfir.declare %[[VAL_13]](%[[VAL_11]]) {uniq_name = ".tmp.array"} : (!fir.heap>, !fir.shape<1>) -> (!fir.heap>, !fir.heap>) +// CHECK: %[[VAL_15:.*]] = arith.constant 1 : index +// CHECK: omp.wsloop { +// CHECK: omp.loop_nest (%[[VAL_16:.*]]) : index = (%[[VAL_15]]) to (%[[VAL_10]]) inclusive step (%[[VAL_15]]) { +// CHECK: %[[VAL_17:.*]] = hlfir.designate %[[VAL_12]]#0 (%[[VAL_16]]) : (!fir.ref>, index) -> !fir.ref +// CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_17]] : !fir.ref +// CHECK: %[[VAL_19:.*]] = fir.load %[[VAL_1]] : !fir.ref +// CHECK: %[[VAL_20:.*]] = arith.subi %[[VAL_18]], %[[VAL_19]] : i32 +// CHECK: %[[VAL_21:.*]] = arith.subi %[[VAL_20]], %[[VAL_9]] : i32 +// CHECK: %[[VAL_22:.*]] = hlfir.designate %[[VAL_14]]#0 (%[[VAL_16]]) : (!fir.heap>, index) -> !fir.ref +// CHECK: hlfir.assign %[[VAL_21]] to %[[VAL_22]] temporary_lhs : i32, !fir.ref +// CHECK: omp.yield +// CHECK: } +// CHECK: omp.terminator +// CHECK: } +// CHECK: omp.single nowait { +// CHECK: "test.test1"(%[[VAL_1]]) : (!fir.ref) -> () +// CHECK: hlfir.assign %[[VAL_14]]#0 to %[[VAL_12]]#0 : !fir.heap>, !fir.ref> +// CHECK: fir.freemem %[[VAL_14]]#0 : !fir.heap> +// CHECK: omp.terminator +// CHECK: } +// CHECK: omp.barrier +// CHECK: return +// CHECK: } + diff --git a/flang/test/Transforms/OpenMP/lower-workshare2.mlir b/flang/test/Transforms/OpenMP/lower-workshare2.mlir new file mode 100644 index 0000000000000..940662e0bdccc --- /dev/null +++ b/flang/test/Transforms/OpenMP/lower-workshare2.mlir @@ -0,0 +1,23 @@ +// RUN: fir-opt --split-input-file --lower-workshare --allow-unregistered-dialect %s | FileCheck %s + +// Check that we correctly handle nowait + +// CHECK-LABEL: func.func @nonowait +func.func @nonowait(%arg0: !fir.ref>) { + // CHECK: omp.barrier + omp.workshare { + omp.terminator + } + return +} + +// ----- + +// CHECK-LABEL: func.func @nowait +func.func @nowait(%arg0: !fir.ref>) { + // CHECK-NOT: omp.barrier + omp.workshare nowait { + omp.terminator + } + return +} diff --git a/flang/test/Transforms/OpenMP/lower-workshare3.mlir b/flang/test/Transforms/OpenMP/lower-workshare3.mlir new file mode 100644 index 0000000000000..5a3d583527fdd --- /dev/null +++ b/flang/test/Transforms/OpenMP/lower-workshare3.mlir @@ -0,0 +1,74 @@ +// RUN: fir-opt --split-input-file --lower-workshare --allow-unregistered-dialect %s | FileCheck %s + + +// Check if we store the correct values + +func.func @wsfunc() { + omp.parallel { + // CHECK: fir.alloca + // CHECK: fir.alloca + // CHECK: fir.alloca + // CHECK: fir.alloca + // CHECK: fir.alloca + // CHECK-NOT: fir.alloca + omp.workshare { + + %t1 = "test.test1"() : () -> i32 + // CHECK: %[[T1:.*]] = "test.test1" + // CHECK: fir.store %[[T1]] + %t2 = "test.test2"() : () -> i32 + // CHECK: %[[T2:.*]] = "test.test2" + // CHECK: fir.store %[[T2]] + %t3 = "test.test3"() : () -> i32 + // CHECK: %[[T3:.*]] = "test.test3" + // CHECK-NOT: fir.store %[[T3]] + %t4 = "test.test4"() : () -> i32 + // CHECK: %[[T4:.*]] = "test.test4" + // CHECK: fir.store %[[T4]] + %t5 = "test.test5"() : () -> i32 + // CHECK: %[[T5:.*]] = "test.test5" + // CHECK: fir.store %[[T5]] + %t6 = "test.test6"() : () -> i32 + // CHECK: %[[T6:.*]] = "test.test6" + // CHECK-NOT: fir.store %[[T6]] + + + "test.test1"(%t1) : (i32) -> () + "test.test1"(%t2) : (i32) -> () + "test.test1"(%t3) : (i32) -> () + + %true = arith.constant true + fir.if %true { + "test.test2"(%t3) : (i32) -> () + } + + %c1_i32 = arith.constant 1 : i32 + + %t5_pure_use = arith.addi %t5, %c1_i32 : i32 + + %t6_mem_effect_use = "test.test8"(%t6) : (i32) -> i32 + // CHECK: %[[T6_USE:.*]] = "test.test8" + // CHECK: fir.store %[[T6_USE]] + + %c42 = arith.constant 42 : index + %c1 = arith.constant 1 : index + omp.workshare.loop_wrapper { + omp.loop_nest (%arg1) : index = (%c1) to (%c42) inclusive step (%c1) { + "test.test10"(%t1) : (i32) -> () + "test.test10"(%t5_pure_use) : (i32) -> () + "test.test10"(%t6_mem_effect_use) : (i32) -> () + omp.yield + } + omp.terminator + } + + "test.test10"(%t2) : (i32) -> () + fir.if %true { + "test.test10"(%t4) : (i32) -> () + } + omp.terminator + } + omp.terminator + } + return +} diff --git a/flang/test/Transforms/OpenMP/lower-workshare4.mlir b/flang/test/Transforms/OpenMP/lower-workshare4.mlir new file mode 100644 index 0000000000000..02fe90097008d --- /dev/null +++ b/flang/test/Transforms/OpenMP/lower-workshare4.mlir @@ -0,0 +1,59 @@ +// RUN: fir-opt --split-input-file --lower-workshare --allow-unregistered-dialect %s | FileCheck %s + +// Check that we cleanup unused pure operations from the parallel and single +// regions + +// CHECK-LABEL: func.func @wsfunc() { +// CHECK: %[[VAL_0:.*]] = fir.alloca i32 +// CHECK: omp.parallel { +// CHECK: omp.single { +// CHECK: %[[VAL_1:.*]] = "test.test1"() : () -> i32 +// CHECK: %[[VAL_2:.*]] = arith.constant 2 : index +// CHECK: %[[VAL_3:.*]] = arith.constant 3 : index +// CHECK: %[[VAL_4:.*]] = arith.addi %[[VAL_2]], %[[VAL_3]] : index +// CHECK: "test.test3"(%[[VAL_4]]) : (index) -> () +// CHECK: omp.terminator +// CHECK: } +// CHECK: %[[VAL_5:.*]] = arith.constant 1 : index +// CHECK: %[[VAL_6:.*]] = arith.constant 42 : index +// CHECK: omp.wsloop nowait { +// CHECK: omp.loop_nest (%[[VAL_7:.*]]) : index = (%[[VAL_5]]) to (%[[VAL_6]]) inclusive step (%[[VAL_5]]) { +// CHECK: "test.test2"() : () -> () +// CHECK: omp.yield +// CHECK: } +// CHECK: omp.terminator +// CHECK: } +// CHECK: omp.barrier +// CHECK: omp.terminator +// CHECK: } +// CHECK: return +// CHECK: } +func.func @wsfunc() { + %a = fir.alloca i32 + omp.parallel { + omp.workshare { + %t1 = "test.test1"() : () -> i32 + + %c1 = arith.constant 1 : index + %c42 = arith.constant 42 : index + + %c2 = arith.constant 2 : index + %c3 = arith.constant 3 : index + %add = arith.addi %c2, %c3 : index + "test.test3"(%add) : (index) -> () + + omp.workshare.loop_wrapper { + omp.loop_nest (%arg1) : index = (%c1) to (%c42) inclusive step (%c1) { + "test.test2"() : () -> () + omp.yield + } + omp.terminator + } + omp.terminator + } + omp.terminator + } + return +} + + diff --git a/flang/test/Transforms/OpenMP/lower-workshare5.mlir b/flang/test/Transforms/OpenMP/lower-workshare5.mlir new file mode 100644 index 0000000000000..177f8aa8f86c7 --- /dev/null +++ b/flang/test/Transforms/OpenMP/lower-workshare5.mlir @@ -0,0 +1,42 @@ +// XFAIL: * +// RUN: fir-opt --split-input-file --lower-workshare --allow-unregistered-dialect %s | FileCheck %s + +// TODO we can lower these but we have no guarantee that the parent of +// omp.workshare supports multi-block regions, thus we fail for now. + +func.func @wsfunc() { + %a = fir.alloca i32 + omp.parallel { + omp.workshare { + ^bb1: + %c1 = arith.constant 1 : i32 + cf.br ^bb3(%c1: i32) + ^bb3(%arg1: i32): + "test.test2"(%arg1) : (i32) -> () + omp.terminator + } + omp.terminator + } + return +} + +// ----- + +func.func @wsfunc() { + %a = fir.alloca i32 + omp.parallel { + omp.workshare { + ^bb1: + %c1 = arith.constant 1 : i32 + cf.br ^bb3(%c1: i32) + ^bb2: + "test.test2"(%r) : (i32) -> () + omp.terminator + ^bb3(%arg1: i32): + %r = "test.test2"(%arg1) : (i32) -> i32 + cf.br ^bb2 + } + omp.terminator + } + return +} diff --git a/flang/test/Transforms/OpenMP/lower-workshare6.mlir b/flang/test/Transforms/OpenMP/lower-workshare6.mlir new file mode 100644 index 0000000000000..48379470e9256 --- /dev/null +++ b/flang/test/Transforms/OpenMP/lower-workshare6.mlir @@ -0,0 +1,51 @@ +// RUN: fir-opt --split-input-file --lower-workshare --allow-unregistered-dialect %s | FileCheck %s + +// Checks that the omp.workshare.loop_wrapper binds to the correct omp.workshare + +func.func @wsfunc() { + %c1 = arith.constant 1 : index + %c42 = arith.constant 42 : index + omp.parallel { + omp.workshare nowait { + omp.parallel { + omp.workshare nowait { + omp.workshare.loop_wrapper { + omp.loop_nest (%arg1) : index = (%c1) to (%c42) inclusive step (%c1) { + "test.test2"() : () -> () + omp.yield + } + omp.terminator + } + omp.terminator + } + omp.terminator + } + omp.terminator + } + omp.terminator + } + return +} + +// CHECK-LABEL: func.func @wsfunc() { +// CHECK: %[[VAL_0:.*]] = arith.constant 1 : index +// CHECK: %[[VAL_1:.*]] = arith.constant 42 : index +// CHECK: omp.parallel { +// CHECK: omp.single nowait { +// CHECK: omp.parallel { +// CHECK: omp.wsloop nowait { +// CHECK: omp.loop_nest (%[[VAL_2:.*]]) : index = (%[[VAL_0]]) to (%[[VAL_1]]) inclusive step (%[[VAL_0]]) { +// CHECK: "test.test2"() : () -> () +// CHECK: omp.yield +// CHECK: } +// CHECK: omp.terminator +// CHECK: } +// CHECK: omp.terminator +// CHECK: } +// CHECK: omp.terminator +// CHECK: } +// CHECK: omp.terminator +// CHECK: } +// CHECK: return +// CHECK: } + diff --git a/flang/tools/bbc/bbc.cpp b/flang/tools/bbc/bbc.cpp index fe5e36f704c76..1c24979bbcdaf 100644 --- a/flang/tools/bbc/bbc.cpp +++ b/flang/tools/bbc/bbc.cpp @@ -452,7 +452,8 @@ static llvm::LogicalResult convertFortranSourceToMLIR( if (emitFIR && useHLFIR) { // lower HLFIR to FIR - fir::createHLFIRToFIRPassPipeline(pm, llvm::OptimizationLevel::O2); + fir::createHLFIRToFIRPassPipeline(pm, enableOpenMP, + llvm::OptimizationLevel::O2); if (mlir::failed(pm.run(mlirModule))) { llvm::errs() << "FATAL: lowering from HLFIR to FIR failed"; return mlir::failure(); @@ -467,6 +468,8 @@ static llvm::LogicalResult convertFortranSourceToMLIR( // Add O2 optimizer pass pipeline. MLIRToLLVMPassPipelineConfig config(llvm::OptimizationLevel::O2); + if (enableOpenMP) + config.EnableOpenMP = true; config.NSWOnLoopVarInc = setNSW; fir::registerDefaultInlinerPass(config); fir::createDefaultFIROptimizerPassPipeline(pm, config); diff --git a/flang/tools/tco/tco.cpp b/flang/tools/tco/tco.cpp index 5c373c4e85258..eaf4bae088454 100644 --- a/flang/tools/tco/tco.cpp +++ b/flang/tools/tco/tco.cpp @@ -139,6 +139,7 @@ compileFIR(const mlir::PassPipelineCLParser &passPipeline) { return mlir::failure(); } else { MLIRToLLVMPassPipelineConfig config(llvm::OptimizationLevel::O2); + config.EnableOpenMP = true; // assume the input contains OpenMP config.AliasAnalysis = true; // enabled when optimizing for speed if (codeGenLLVM) { // Run only CodeGen passes. From 1152749adc6f254d2bed42ee05d1e6d10d2df653 Mon Sep 17 00:00:00 2001 From: Ivan Radanov Ivanov Date: Mon, 23 Sep 2024 15:07:48 +0900 Subject: [PATCH 10/34] Emit a proper error message for CFG in workshare --- flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp | 13 +++++- .../OpenMP/lower-workshare-todo-cfg-dom.mlir | 23 ++++++++++ .../OpenMP/lower-workshare-todo-cfg.mlir | 20 +++++++++ .../Transforms/OpenMP/lower-workshare5.mlir | 42 ------------------- 4 files changed, 55 insertions(+), 43 deletions(-) create mode 100644 flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir create mode 100644 flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir delete mode 100644 flang/test/Transforms/OpenMP/lower-workshare5.mlir diff --git a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp index 6e5538b54ba5e..cf1867311cc23 100644 --- a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp +++ b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp @@ -16,6 +16,7 @@ // //===----------------------------------------------------------------------===// +#include "flang/Optimizer/Builder/Todo.h" #include #include #include @@ -416,8 +417,18 @@ LogicalResult lowerWorkshare(mlir::omp::WorkshareOp wsOp, DominanceInfo &di) { parallelizeRegion(wsOp.getRegion(), newOp.getRegion(), rootMapping, loc, di); + // FIXME Currently, we only support workshare constructs with structured + // control flow. The transformation itself supports CFG, however, once we + // transform the MLIR region in the omp.workshare, we need to inline that + // region in the parent block. We have no guarantees at this point of the + // pipeline that the parent op supports CFG (e.g. fir.if), thus this is not + // generally possible. The alternative is to put the lowered region in an + // operation akin to scf.execute_region, which will get lowered at the same + // time when fir ops get lowered to CFG. However, SCF is not registered in + // flang so we cannot use it. Remove this requirement once we have + // scf.execute_region or an alternative operation available. if (wsOp.getRegion().getBlocks().size() != 1) - return failure(); + TODO(wsOp->getLoc(), "omp workshare with unstructured control flow"); // Inline the contents of the placeholder workshare op into its parent block. Block *theBlock = &newOp.getRegion().front(); diff --git a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir new file mode 100644 index 0000000000000..1c47d448f597d --- /dev/null +++ b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir @@ -0,0 +1,23 @@ +// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s + +// CHECK: not yet implemented: omp workshare with unstructured control flow + +// Check that the definition of %r dominates its use post-transform +func.func @wsfunc() { + %a = fir.alloca i32 + omp.parallel { + omp.workshare { + ^bb1: + %c1 = arith.constant 1 : i32 + cf.br ^bb3(%c1: i32) + ^bb2: + "test.test2"(%r) : (i32) -> () + omp.terminator + ^bb3(%arg1: i32): + %r = "test.test2"(%arg1) : (i32) -> i32 + cf.br ^bb2 + } + omp.terminator + } + return +} diff --git a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir new file mode 100644 index 0000000000000..bf6c196a05b4a --- /dev/null +++ b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir @@ -0,0 +1,20 @@ +// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s + +// CHECK: not yet implemented: omp workshare with unstructured control flow + +// Check transforming a simple CFG +func.func @wsfunc() { + %a = fir.alloca i32 + omp.parallel { + omp.workshare { + ^bb1: + %c1 = arith.constant 1 : i32 + cf.br ^bb3(%c1: i32) + ^bb3(%arg1: i32): + "test.test2"(%arg1) : (i32) -> () + omp.terminator + } + omp.terminator + } + return +} diff --git a/flang/test/Transforms/OpenMP/lower-workshare5.mlir b/flang/test/Transforms/OpenMP/lower-workshare5.mlir deleted file mode 100644 index 177f8aa8f86c7..0000000000000 --- a/flang/test/Transforms/OpenMP/lower-workshare5.mlir +++ /dev/null @@ -1,42 +0,0 @@ -// XFAIL: * -// RUN: fir-opt --split-input-file --lower-workshare --allow-unregistered-dialect %s | FileCheck %s - -// TODO we can lower these but we have no guarantee that the parent of -// omp.workshare supports multi-block regions, thus we fail for now. - -func.func @wsfunc() { - %a = fir.alloca i32 - omp.parallel { - omp.workshare { - ^bb1: - %c1 = arith.constant 1 : i32 - cf.br ^bb3(%c1: i32) - ^bb3(%arg1: i32): - "test.test2"(%arg1) : (i32) -> () - omp.terminator - } - omp.terminator - } - return -} - -// ----- - -func.func @wsfunc() { - %a = fir.alloca i32 - omp.parallel { - omp.workshare { - ^bb1: - %c1 = arith.constant 1 : i32 - cf.br ^bb3(%c1: i32) - ^bb2: - "test.test2"(%r) : (i32) -> () - omp.terminator - ^bb3(%arg1: i32): - %r = "test.test2"(%arg1) : (i32) -> i32 - cf.br ^bb2 - } - omp.terminator - } - return -} From e3130f1e82bc1dfe72b9e191553df59cfe86ca52 Mon Sep 17 00:00:00 2001 From: Ivan Radanov Ivanov Date: Mon, 23 Sep 2024 15:44:23 +0900 Subject: [PATCH 11/34] Cleanup tests --- .../OpenMP/lower-workshare-alloca.mlir | 55 +++++ ...are6.mlir => lower-workshare-binding.mlir} | 0 ...are4.mlir => lower-workshare-cleanup.mlir} | 0 ....mlir => lower-workshare-copyprivate.mlir} | 0 ...hare2.mlir => lower-workshare-nowait.mlir} | 0 .../Transforms/OpenMP/lower-workshare.mlir | 189 ------------------ 6 files changed, 55 insertions(+), 189 deletions(-) create mode 100644 flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir rename flang/test/Transforms/OpenMP/{lower-workshare6.mlir => lower-workshare-binding.mlir} (100%) rename flang/test/Transforms/OpenMP/{lower-workshare4.mlir => lower-workshare-cleanup.mlir} (100%) rename flang/test/Transforms/OpenMP/{lower-workshare3.mlir => lower-workshare-copyprivate.mlir} (100%) rename flang/test/Transforms/OpenMP/{lower-workshare2.mlir => lower-workshare-nowait.mlir} (100%) delete mode 100644 flang/test/Transforms/OpenMP/lower-workshare.mlir diff --git a/flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir b/flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir new file mode 100644 index 0000000000000..d1bef3a359e48 --- /dev/null +++ b/flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir @@ -0,0 +1,55 @@ +// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s | FileCheck %s + +// Checks that fir.alloca is hoisted out and copyprivate'd +func.func @wsfunc() { + omp.workshare { + %c1 = arith.constant 1 : index + %c42 = arith.constant 42 : index + %c1_i32 = arith.constant 1 : i32 + %alloc = fir.alloca i32 + fir.store %c1_i32 to %alloc : !fir.ref + omp.workshare.loop_wrapper { + omp.loop_nest (%arg1) : index = (%c1) to (%c42) inclusive step (%c1) { + "test.test1"(%alloc) : (!fir.ref) -> () + omp.yield + } + omp.terminator + } + "test.test2"(%alloc) : (!fir.ref) -> () + omp.terminator + } + return +} + +// CHECK-LABEL: func.func private @_workshare_copy_i32( +// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref, +// CHECK-SAME: %[[VAL_1:.*]]: !fir.ref) { +// CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref +// CHECK: fir.store %[[VAL_2]] to %[[VAL_1]] : !fir.ref +// CHECK: return +// CHECK: } + +// CHECK-LABEL: func.func @wsfunc() { +// CHECK: %[[VAL_0:.*]] = fir.alloca i32 +// CHECK: omp.single copyprivate(%[[VAL_0]] -> @_workshare_copy_i32 : !fir.ref) { +// CHECK: %[[VAL_1:.*]] = arith.constant 1 : i32 +// CHECK: fir.store %[[VAL_1]] to %[[VAL_0]] : !fir.ref +// CHECK: omp.terminator +// CHECK: } +// CHECK: %[[VAL_2:.*]] = arith.constant 1 : index +// CHECK: %[[VAL_3:.*]] = arith.constant 42 : index +// CHECK: omp.wsloop { +// CHECK: omp.loop_nest (%[[VAL_4:.*]]) : index = (%[[VAL_2]]) to (%[[VAL_3]]) inclusive step (%[[VAL_2]]) { +// CHECK: "test.test1"(%[[VAL_0]]) : (!fir.ref) -> () +// CHECK: omp.yield +// CHECK: } +// CHECK: omp.terminator +// CHECK: } +// CHECK: omp.single nowait { +// CHECK: "test.test2"(%[[VAL_0]]) : (!fir.ref) -> () +// CHECK: omp.terminator +// CHECK: } +// CHECK: omp.barrier +// CHECK: return +// CHECK: } + diff --git a/flang/test/Transforms/OpenMP/lower-workshare6.mlir b/flang/test/Transforms/OpenMP/lower-workshare-binding.mlir similarity index 100% rename from flang/test/Transforms/OpenMP/lower-workshare6.mlir rename to flang/test/Transforms/OpenMP/lower-workshare-binding.mlir diff --git a/flang/test/Transforms/OpenMP/lower-workshare4.mlir b/flang/test/Transforms/OpenMP/lower-workshare-cleanup.mlir similarity index 100% rename from flang/test/Transforms/OpenMP/lower-workshare4.mlir rename to flang/test/Transforms/OpenMP/lower-workshare-cleanup.mlir diff --git a/flang/test/Transforms/OpenMP/lower-workshare3.mlir b/flang/test/Transforms/OpenMP/lower-workshare-copyprivate.mlir similarity index 100% rename from flang/test/Transforms/OpenMP/lower-workshare3.mlir rename to flang/test/Transforms/OpenMP/lower-workshare-copyprivate.mlir diff --git a/flang/test/Transforms/OpenMP/lower-workshare2.mlir b/flang/test/Transforms/OpenMP/lower-workshare-nowait.mlir similarity index 100% rename from flang/test/Transforms/OpenMP/lower-workshare2.mlir rename to flang/test/Transforms/OpenMP/lower-workshare-nowait.mlir diff --git a/flang/test/Transforms/OpenMP/lower-workshare.mlir b/flang/test/Transforms/OpenMP/lower-workshare.mlir deleted file mode 100644 index a609ee5d3d6c2..0000000000000 --- a/flang/test/Transforms/OpenMP/lower-workshare.mlir +++ /dev/null @@ -1,189 +0,0 @@ -// RUN: fir-opt --split-input-file --lower-workshare --allow-unregistered-dialect %s | FileCheck %s - -// checks: -// nowait on final omp.single -func.func @wsfunc(%arg0: !fir.ref>) { - omp.parallel { - omp.workshare { - %c42 = arith.constant 42 : index - %c1_i32 = arith.constant 1 : i32 - %0 = fir.shape %c42 : (index) -> !fir.shape<1> - %1:2 = hlfir.declare %arg0(%0) {uniq_name = "array"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) - %2 = fir.allocmem !fir.array<42xi32> {bindc_name = ".tmp.array", uniq_name = ""} - %3:2 = hlfir.declare %2(%0) {uniq_name = ".tmp.array"} : (!fir.heap>, !fir.shape<1>) -> (!fir.heap>, !fir.heap>) - %true = arith.constant true - %c1 = arith.constant 1 : index - omp.workshare.loop_wrapper { - omp.loop_nest (%arg1) : index = (%c1) to (%c42) inclusive step (%c1) { - %7 = hlfir.designate %1#0 (%arg1) : (!fir.ref>, index) -> !fir.ref - %8 = fir.load %7 : !fir.ref - %9 = arith.subi %8, %c1_i32 : i32 - %10 = hlfir.designate %3#0 (%arg1) : (!fir.heap>, index) -> !fir.ref - hlfir.assign %9 to %10 temporary_lhs : i32, !fir.ref - omp.yield - } - omp.terminator - } - %4 = fir.undefined tuple>, i1> - %5 = fir.insert_value %4, %true, [1 : index] : (tuple>, i1>, i1) -> tuple>, i1> - %6 = fir.insert_value %5, %3#0, [0 : index] : (tuple>, i1>, !fir.heap>) -> tuple>, i1> - hlfir.assign %3#0 to %1#0 : !fir.heap>, !fir.ref> - fir.freemem %3#0 : !fir.heap> - omp.terminator - } - omp.terminator - } - return -} - -// ----- - -// checks: -// fir.alloca hoisted out and copyprivate'd -func.func @wsfunc(%arg0: !fir.ref>) { - omp.workshare { - %c1_i32 = arith.constant 1 : i32 - %alloc = fir.alloca i32 - fir.store %c1_i32 to %alloc : !fir.ref - %c42 = arith.constant 42 : index - %0 = fir.shape %c42 : (index) -> !fir.shape<1> - %1:2 = hlfir.declare %arg0(%0) {uniq_name = "array"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) - %2 = fir.allocmem !fir.array<42xi32> {bindc_name = ".tmp.array", uniq_name = ""} - %3:2 = hlfir.declare %2(%0) {uniq_name = ".tmp.array"} : (!fir.heap>, !fir.shape<1>) -> (!fir.heap>, !fir.heap>) - %true = arith.constant true - %c1 = arith.constant 1 : index - omp.workshare.loop_wrapper { - omp.loop_nest (%arg1) : index = (%c1) to (%c42) inclusive step (%c1) { - %7 = hlfir.designate %1#0 (%arg1) : (!fir.ref>, index) -> !fir.ref - %8 = fir.load %7 : !fir.ref - %ld = fir.load %alloc : !fir.ref - %n8 = arith.subi %8, %ld : i32 - %9 = arith.subi %n8, %c1_i32 : i32 - %10 = hlfir.designate %3#0 (%arg1) : (!fir.heap>, index) -> !fir.ref - hlfir.assign %9 to %10 temporary_lhs : i32, !fir.ref - omp.yield - } - omp.terminator - } - %4 = fir.undefined tuple>, i1> - %5 = fir.insert_value %4, %true, [1 : index] : (tuple>, i1>, i1) -> tuple>, i1> - %6 = fir.insert_value %5, %3#0, [0 : index] : (tuple>, i1>, !fir.heap>) -> tuple>, i1> - "test.test1"(%alloc) : (!fir.ref) -> () - hlfir.assign %3#0 to %1#0 : !fir.heap>, !fir.ref> - fir.freemem %3#0 : !fir.heap> - omp.terminator - } - return -} - -// CHECK-LABEL: func.func private @_workshare_copy_heap_42xi32( -// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref>>, -// CHECK-SAME: %[[VAL_1:.*]]: !fir.ref>>) { -// CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref>> -// CHECK: fir.store %[[VAL_2]] to %[[VAL_1]] : !fir.ref>> -// CHECK: return -// CHECK: } - -// CHECK-LABEL: func.func @wsfunc( -// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref>) { -// CHECK: omp.parallel { -// CHECK: %[[VAL_1:.*]] = fir.alloca !fir.heap> -// CHECK: omp.single copyprivate(%[[VAL_1]] -> @_workshare_copy_heap_42xi32 : !fir.ref>>) { -// CHECK: %[[VAL_2:.*]] = arith.constant 42 : index -// CHECK: %[[VAL_3:.*]] = fir.shape %[[VAL_2]] : (index) -> !fir.shape<1> -// CHECK: %[[VAL_4:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_3]]) {uniq_name = "array"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) -// CHECK: %[[VAL_5:.*]] = fir.allocmem !fir.array<42xi32> {bindc_name = ".tmp.array", uniq_name = ""} -// CHECK: fir.store %[[VAL_5]] to %[[VAL_1]] : !fir.ref>> -// CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]](%[[VAL_3]]) {uniq_name = ".tmp.array"} : (!fir.heap>, !fir.shape<1>) -> (!fir.heap>, !fir.heap>) -// CHECK: omp.terminator -// CHECK: } -// CHECK: %[[VAL_7:.*]] = arith.constant 42 : index -// CHECK: %[[VAL_8:.*]] = arith.constant 1 : i32 -// CHECK: %[[VAL_9:.*]] = fir.shape %[[VAL_7]] : (index) -> !fir.shape<1> -// CHECK: %[[VAL_10:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_9]]) {uniq_name = "array"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) -// CHECK: %[[VAL_11:.*]] = fir.load %[[VAL_1]] : !fir.ref>> -// CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]](%[[VAL_9]]) {uniq_name = ".tmp.array"} : (!fir.heap>, !fir.shape<1>) -> (!fir.heap>, !fir.heap>) -// CHECK: %[[VAL_13:.*]] = arith.constant 1 : index -// CHECK: omp.wsloop { -// CHECK: omp.loop_nest (%[[VAL_14:.*]]) : index = (%[[VAL_13]]) to (%[[VAL_7]]) inclusive step (%[[VAL_13]]) { -// CHECK: %[[VAL_15:.*]] = hlfir.designate %[[VAL_10]]#0 (%[[VAL_14]]) : (!fir.ref>, index) -> !fir.ref -// CHECK: %[[VAL_16:.*]] = fir.load %[[VAL_15]] : !fir.ref -// CHECK: %[[VAL_17:.*]] = arith.subi %[[VAL_16]], %[[VAL_8]] : i32 -// CHECK: %[[VAL_18:.*]] = hlfir.designate %[[VAL_12]]#0 (%[[VAL_14]]) : (!fir.heap>, index) -> !fir.ref -// CHECK: hlfir.assign %[[VAL_17]] to %[[VAL_18]] temporary_lhs : i32, !fir.ref -// CHECK: omp.yield -// CHECK: } -// CHECK: omp.terminator -// CHECK: } -// CHECK: omp.single nowait { -// CHECK: hlfir.assign %[[VAL_12]]#0 to %[[VAL_10]]#0 : !fir.heap>, !fir.ref> -// CHECK: fir.freemem %[[VAL_12]]#0 : !fir.heap> -// CHECK: omp.terminator -// CHECK: } -// CHECK: omp.barrier -// CHECK: omp.terminator -// CHECK: } -// CHECK: return -// CHECK: } - -// CHECK-LABEL: func.func private @_workshare_copy_heap_42xi32( -// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref>>, -// CHECK-SAME: %[[VAL_1:.*]]: !fir.ref>>) { -// CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref>> -// CHECK: fir.store %[[VAL_2]] to %[[VAL_1]] : !fir.ref>> -// CHECK: return -// CHECK: } - -// CHECK-LABEL: func.func private @_workshare_copy_i32( -// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref, -// CHECK-SAME: %[[VAL_1:.*]]: !fir.ref) { -// CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref -// CHECK: fir.store %[[VAL_2]] to %[[VAL_1]] : !fir.ref -// CHECK: return -// CHECK: } - -// CHECK-LABEL: func.func @wsfunc( -// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref>) { -// CHECK: %[[VAL_1:.*]] = fir.alloca i32 -// CHECK: %[[VAL_2:.*]] = fir.alloca !fir.heap> -// CHECK: omp.single copyprivate(%[[VAL_1]] -> @_workshare_copy_i32 : !fir.ref, %[[VAL_2]] -> @_workshare_copy_heap_42xi32 : !fir.ref>>) { -// CHECK: %[[VAL_3:.*]] = arith.constant 1 : i32 -// CHECK: fir.store %[[VAL_3]] to %[[VAL_1]] : !fir.ref -// CHECK: %[[VAL_4:.*]] = arith.constant 42 : index -// CHECK: %[[VAL_5:.*]] = fir.shape %[[VAL_4]] : (index) -> !fir.shape<1> -// CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_5]]) {uniq_name = "array"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) -// CHECK: %[[VAL_7:.*]] = fir.allocmem !fir.array<42xi32> {bindc_name = ".tmp.array", uniq_name = ""} -// CHECK: fir.store %[[VAL_7]] to %[[VAL_2]] : !fir.ref>> -// CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]](%[[VAL_5]]) {uniq_name = ".tmp.array"} : (!fir.heap>, !fir.shape<1>) -> (!fir.heap>, !fir.heap>) -// CHECK: omp.terminator -// CHECK: } -// CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32 -// CHECK: %[[VAL_10:.*]] = arith.constant 42 : index -// CHECK: %[[VAL_11:.*]] = fir.shape %[[VAL_10]] : (index) -> !fir.shape<1> -// CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_11]]) {uniq_name = "array"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) -// CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_2]] : !fir.ref>> -// CHECK: %[[VAL_14:.*]]:2 = hlfir.declare %[[VAL_13]](%[[VAL_11]]) {uniq_name = ".tmp.array"} : (!fir.heap>, !fir.shape<1>) -> (!fir.heap>, !fir.heap>) -// CHECK: %[[VAL_15:.*]] = arith.constant 1 : index -// CHECK: omp.wsloop { -// CHECK: omp.loop_nest (%[[VAL_16:.*]]) : index = (%[[VAL_15]]) to (%[[VAL_10]]) inclusive step (%[[VAL_15]]) { -// CHECK: %[[VAL_17:.*]] = hlfir.designate %[[VAL_12]]#0 (%[[VAL_16]]) : (!fir.ref>, index) -> !fir.ref -// CHECK: %[[VAL_18:.*]] = fir.load %[[VAL_17]] : !fir.ref -// CHECK: %[[VAL_19:.*]] = fir.load %[[VAL_1]] : !fir.ref -// CHECK: %[[VAL_20:.*]] = arith.subi %[[VAL_18]], %[[VAL_19]] : i32 -// CHECK: %[[VAL_21:.*]] = arith.subi %[[VAL_20]], %[[VAL_9]] : i32 -// CHECK: %[[VAL_22:.*]] = hlfir.designate %[[VAL_14]]#0 (%[[VAL_16]]) : (!fir.heap>, index) -> !fir.ref -// CHECK: hlfir.assign %[[VAL_21]] to %[[VAL_22]] temporary_lhs : i32, !fir.ref -// CHECK: omp.yield -// CHECK: } -// CHECK: omp.terminator -// CHECK: } -// CHECK: omp.single nowait { -// CHECK: "test.test1"(%[[VAL_1]]) : (!fir.ref) -> () -// CHECK: hlfir.assign %[[VAL_14]]#0 to %[[VAL_12]]#0 : !fir.heap>, !fir.ref> -// CHECK: fir.freemem %[[VAL_14]]#0 : !fir.heap> -// CHECK: omp.terminator -// CHECK: } -// CHECK: omp.barrier -// CHECK: return -// CHECK: } - From 75b213f57e4012887a7f5036573636ddea88a83f Mon Sep 17 00:00:00 2001 From: Ivan Radanov Ivanov Date: Mon, 23 Sep 2024 16:25:55 +0900 Subject: [PATCH 12/34] Fix todo tests --- flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir | 2 +- flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir index 1c47d448f597d..d10996167ae62 100644 --- a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir +++ b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir @@ -1,4 +1,4 @@ -// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s +// RUN: %not_todo_cmd fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s // CHECK: not yet implemented: omp workshare with unstructured control flow diff --git a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir index bf6c196a05b4a..46d2a8e8d48a8 100644 --- a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir +++ b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir @@ -1,4 +1,4 @@ -// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s +// RUN: %not_todo_cmd fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s // CHECK: not yet implemented: omp workshare with unstructured control flow From b227891307941a7376858b1d3a699108bead1fb2 Mon Sep 17 00:00:00 2001 From: Ivan Radanov Ivanov Date: Fri, 4 Oct 2024 14:21:14 +0900 Subject: [PATCH 13/34] Fix dst src in copy function --- flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp index cf1867311cc23..baf8346e7608a 100644 --- a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp +++ b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp @@ -162,8 +162,8 @@ static mlir::func::FuncOp createCopyFunc(mlir::Location loc, mlir::Type varType, {loc, loc}); builder.setInsertionPointToStart(&funcOp.getRegion().back()); - Value loaded = builder.create(loc, funcOp.getArgument(0)); - builder.create(loc, loaded, funcOp.getArgument(1)); + Value loaded = builder.create(loc, funcOp.getArgument(1)); + builder.create(loc, loaded, funcOp.getArgument(0)); builder.create(loc); return funcOp; From 676bf68aa1ca532300070874622e49da1cbbc25a Mon Sep 17 00:00:00 2001 From: Ivan Radanov Ivanov Date: Fri, 4 Oct 2024 14:38:48 +0900 Subject: [PATCH 14/34] Use omp.single to handle CFG cases --- flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp | 77 +++++++++++++------ 1 file changed, 53 insertions(+), 24 deletions(-) diff --git a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp index baf8346e7608a..34399abbcd20e 100644 --- a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp +++ b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp @@ -16,7 +16,6 @@ // //===----------------------------------------------------------------------===// -#include "flang/Optimizer/Builder/Todo.h" #include #include #include @@ -39,7 +38,6 @@ #include #include #include -#include #include @@ -96,6 +94,12 @@ bool shouldUseWorkshareLowering(Operation *op) { if (isNestedIn(parentWorkshare, op)) return false; + if (parentWorkshare.getRegion().getBlocks().size() != 1) { + parentWorkshare->emitWarning( + "omp workshare with unstructured control flow currently unsupported."); + return false; + } + return true; } @@ -408,15 +412,6 @@ LogicalResult lowerWorkshare(mlir::omp::WorkshareOp wsOp, DominanceInfo &di) { OpBuilder rootBuilder(wsOp); - // This operation is just a placeholder which will be erased later. We need it - // because our `parallelizeRegion` function works on regions and not blocks. - omp::WorkshareOp newOp = - rootBuilder.create(loc, omp::WorkshareOperands()); - if (!wsOp.getNowait()) - rootBuilder.create(loc); - - parallelizeRegion(wsOp.getRegion(), newOp.getRegion(), rootMapping, loc, di); - // FIXME Currently, we only support workshare constructs with structured // control flow. The transformation itself supports CFG, however, once we // transform the MLIR region in the omp.workshare, we need to inline that @@ -427,19 +422,53 @@ LogicalResult lowerWorkshare(mlir::omp::WorkshareOp wsOp, DominanceInfo &di) { // time when fir ops get lowered to CFG. However, SCF is not registered in // flang so we cannot use it. Remove this requirement once we have // scf.execute_region or an alternative operation available. - if (wsOp.getRegion().getBlocks().size() != 1) - TODO(wsOp->getLoc(), "omp workshare with unstructured control flow"); - - // Inline the contents of the placeholder workshare op into its parent block. - Block *theBlock = &newOp.getRegion().front(); - Operation *term = theBlock->getTerminator(); - Block *parentBlock = wsOp->getBlock(); - parentBlock->getOperations().splice(newOp->getIterator(), - theBlock->getOperations()); - assert(term->getNumOperands() == 0); - term->erase(); - newOp->erase(); - wsOp->erase(); + if (wsOp.getRegion().getBlocks().size() == 1) { + // This operation is just a placeholder which will be erased later. We need + // it because our `parallelizeRegion` function works on regions and not + // blocks. + omp::WorkshareOp newOp = + rootBuilder.create(loc, omp::WorkshareOperands()); + if (!wsOp.getNowait()) + rootBuilder.create(loc); + + parallelizeRegion(wsOp.getRegion(), newOp.getRegion(), rootMapping, loc, + di); + + // Inline the contents of the placeholder workshare op into its parent + // block. + Block *theBlock = &newOp.getRegion().front(); + Operation *term = theBlock->getTerminator(); + Block *parentBlock = wsOp->getBlock(); + parentBlock->getOperations().splice(newOp->getIterator(), + theBlock->getOperations()); + assert(term->getNumOperands() == 0); + term->erase(); + newOp->erase(); + wsOp->erase(); + } else { + // Otherwise just change the operation to an omp.single. + + // `shouldUseWorkshareLowering` should have guaranteed that there are no + // omp.workshare_loop_wrapper's that bind to this omp.workshare. + assert(!wsOp->walk([&](Operation *op) { + // Nested omp.workshare can have their own + // omp.workshare_loop_wrapper's. + if (isa(op)) + return WalkResult::skip(); + if (isa(op)) + return WalkResult::interrupt(); + return WalkResult::advance(); + }) + .wasInterrupted()); + + omp::SingleOperands operands; + operands.nowait = wsOp.getNowaitAttr(); + omp::SingleOp newOp = rootBuilder.create(loc, operands); + + newOp.getRegion().getBlocks().splice(newOp.getRegion().getBlocks().begin(), + wsOp.getRegion().getBlocks()); + wsOp->erase(); + } return success(); } From 4d20893efca8c9c87d42a0d5324b945dac118155 Mon Sep 17 00:00:00 2001 From: Ivan Radanov Ivanov Date: Fri, 4 Oct 2024 15:12:14 +0900 Subject: [PATCH 15/34] Fix lower workshare tests --- flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir | 4 ++-- .../Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir | 7 ++++--- flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir | 7 ++++--- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir b/flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir index d1bef3a359e48..618b8d9c19b6b 100644 --- a/flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir +++ b/flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir @@ -24,8 +24,8 @@ func.func @wsfunc() { // CHECK-LABEL: func.func private @_workshare_copy_i32( // CHECK-SAME: %[[VAL_0:.*]]: !fir.ref, // CHECK-SAME: %[[VAL_1:.*]]: !fir.ref) { -// CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref -// CHECK: fir.store %[[VAL_2]] to %[[VAL_1]] : !fir.ref +// CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_1]] : !fir.ref +// CHECK: fir.store %[[VAL_2]] to %[[VAL_0]] : !fir.ref // CHECK: return // CHECK: } diff --git a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir index d10996167ae62..62d9da6c520f8 100644 --- a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir +++ b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir @@ -1,8 +1,9 @@ -// RUN: %not_todo_cmd fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s +// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s -// CHECK: not yet implemented: omp workshare with unstructured control flow +// CHECK: omp.parallel +// CHECK-NEXT: omp.single -// Check that the definition of %r dominates its use post-transform +// TODO Check that the definition of %r dominates its use post-transform func.func @wsfunc() { %a = fir.alloca i32 omp.parallel { diff --git a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir index 46d2a8e8d48a8..d9551eb99f076 100644 --- a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir +++ b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir @@ -1,8 +1,9 @@ -// RUN: %not_todo_cmd fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s +// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s -// CHECK: not yet implemented: omp workshare with unstructured control flow +// CHECK: omp.parallel +// CHECK-NEXT: omp.single -// Check transforming a simple CFG +// TODO Check transforming a simple CFG func.func @wsfunc() { %a = fir.alloca i32 omp.parallel { From 5760383d7a4b491ea51dc4cfebdfd43d39bfba07 Mon Sep 17 00:00:00 2001 From: Ivan Radanov Ivanov Date: Fri, 4 Oct 2024 15:28:07 +0900 Subject: [PATCH 16/34] Different warning --- flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp | 9 +++++---- .../Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir | 2 ++ .../test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir | 2 ++ 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp index 34399abbcd20e..4d8e2a9a06714 100644 --- a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp +++ b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp @@ -94,11 +94,9 @@ bool shouldUseWorkshareLowering(Operation *op) { if (isNestedIn(parentWorkshare, op)) return false; - if (parentWorkshare.getRegion().getBlocks().size() != 1) { - parentWorkshare->emitWarning( - "omp workshare with unstructured control flow currently unsupported."); + // Do not use workshare lowering until we support CFG in omp.workshare + if (parentWorkshare.getRegion().getBlocks().size() != 1) return false; - } return true; } @@ -448,6 +446,9 @@ LogicalResult lowerWorkshare(mlir::omp::WorkshareOp wsOp, DominanceInfo &di) { } else { // Otherwise just change the operation to an omp.single. + wsOp->emitWarning("omp workshare with unstructured control flow currently " + "unsupported and will be serialized."); + // `shouldUseWorkshareLowering` should have guaranteed that there are no // omp.workshare_loop_wrapper's that bind to this omp.workshare. assert(!wsOp->walk([&](Operation *op) { diff --git a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir index 62d9da6c520f8..96dc878bed0c9 100644 --- a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir +++ b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir @@ -1,5 +1,7 @@ // RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s +// CHECK: warning: omp workshare with unstructured control flow currently unsupported and will be serialized. + // CHECK: omp.parallel // CHECK-NEXT: omp.single diff --git a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir index d9551eb99f076..ce8a4eb96982b 100644 --- a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir +++ b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir @@ -1,5 +1,7 @@ // RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s +// CHECK: warning: omp workshare with unstructured control flow currently unsupported and will be serialized. + // CHECK: omp.parallel // CHECK-NEXT: omp.single From 71d13e248166908d98533b267fad16e4ff12caa2 Mon Sep 17 00:00:00 2001 From: Ivan Radanov Ivanov Date: Fri, 4 Oct 2024 22:45:09 +0900 Subject: [PATCH 17/34] Fix bug and add better clarification comments --- flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp | 28 ++++++++++++++++--- .../lower-workshare-correct-parallelize.mlir | 16 +++++++++++ 2 files changed, 40 insertions(+), 4 deletions(-) create mode 100644 flang/test/Transforms/OpenMP/lower-workshare-correct-parallelize.mlir diff --git a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp index 4d8e2a9a06714..84cf5e8216798 100644 --- a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp +++ b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include @@ -188,14 +189,19 @@ static bool isTransitivelyUsedOutside(Value v, SingleRegion sr) { if (isUserOutsideSR(user, parentOp, sr)) return true; - // Results of nested users cannot be used outside of the SR + // Now we know user is inside `sr`. + + // Results of nested users cannot be used outside of `sr`. if (user->getBlock() != srBlock) continue; - // A non-safe to parallelize operation will be handled separately + // A non-safe to parallelize operation will be checked for uses outside + // separately. if (!isSafeToParallelize(user)) continue; + // For safe to parallelize operations, we need to check if there is a + // transitive use of `v` through them. for (auto res : user->getResults()) if (isTransitivelyUsedOutside(res, sr)) return true; @@ -242,7 +248,21 @@ static void parallelizeRegion(Region &sourceRegion, Region &targetRegion, for (Operation &op : llvm::make_range(sr.begin, sr.end)) { if (isSafeToParallelize(&op)) { singleBuilder.clone(op, singleMapping); - parallelBuilder.clone(op, rootMapping); + if (llvm::all_of(op.getOperands(), [&](Value opr) { + return rootMapping.contains(opr); + })) { + // Safe to parallelize operations which have all operands available in + // the root parallel block can be executed there. + parallelBuilder.clone(op, rootMapping); + } else { + // If any operand was not available, it means that there was no + // transitive use of a non-safe-to-parallelize operation outside `sr`. + // This means that there should be no transitive uses outside `sr` of + // `op`. + assert(llvm::all_of(op.getResults(), [&](Value v) { + return !isTransitivelyUsedOutside(v, sr); + })); + } } else if (auto alloca = dyn_cast(&op)) { auto hoisted = cast(allocaBuilder.clone(*alloca, singleMapping)); @@ -252,7 +272,7 @@ static void parallelizeRegion(Region &sourceRegion, Region &targetRegion, } else { singleBuilder.clone(op, singleMapping); // Prepare reloaded values for results of operations that cannot be - // safely parallelized and which are used after the region `sr` + // safely parallelized and which are used after the region `sr`. for (auto res : op.getResults()) { if (isTransitivelyUsedOutside(res, sr)) { auto alloc = mapReloadedValue(res, allocaBuilder, singleBuilder, diff --git a/flang/test/Transforms/OpenMP/lower-workshare-correct-parallelize.mlir b/flang/test/Transforms/OpenMP/lower-workshare-correct-parallelize.mlir new file mode 100644 index 0000000000000..99ca4fe5a0e21 --- /dev/null +++ b/flang/test/Transforms/OpenMP/lower-workshare-correct-parallelize.mlir @@ -0,0 +1,16 @@ +// RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s | FileCheck %s + +// Check that the safe to parallelize `fir.declare` op will not be parallelized +// due to its operand %alloc not being reloaded outside the omp.single. + +func.func @foo() { + %c0 = arith.constant 0 : index + omp.workshare { + %alloc = fir.allocmem !fir.array, %c0 {bindc_name = ".tmp.forall", uniq_name = ""} + %shape = fir.shape %c0 : (index) -> !fir.shape<1> + %declare = fir.declare %alloc(%shape) {uniq_name = ".tmp.forall"} : (!fir.heap>, !fir.shape<1>) -> !fir.heap> + fir.freemem %alloc : !fir.heap> + omp.terminator + } + return +} From 487305670f8fffc81c37a39635c08fdcad0734f9 Mon Sep 17 00:00:00 2001 From: Ivan Radanov Ivanov Date: Fri, 4 Oct 2024 22:48:42 +0900 Subject: [PATCH 18/34] Fix message --- flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp index 84cf5e8216798..a91f64f04a30a 100644 --- a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp +++ b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp @@ -466,8 +466,9 @@ LogicalResult lowerWorkshare(mlir::omp::WorkshareOp wsOp, DominanceInfo &di) { } else { // Otherwise just change the operation to an omp.single. - wsOp->emitWarning("omp workshare with unstructured control flow currently " - "unsupported and will be serialized."); + wsOp->emitWarning( + "omp workshare with unstructured control flow is currently " + "unsupported and will be serialized."); // `shouldUseWorkshareLowering` should have guaranteed that there are no // omp.workshare_loop_wrapper's that bind to this omp.workshare. From 15f8d3da3fa4b468049752ec57d6b02362a12786 Mon Sep 17 00:00:00 2001 From: Ivan Radanov Ivanov Date: Sat, 5 Oct 2024 12:57:48 +0900 Subject: [PATCH 19/34] Fix tests --- flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp | 7 ++++++- .../OpenMP/lower-workshare-correct-parallelize.mlir | 9 +++++++++ .../Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir | 2 +- .../test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir | 2 +- 4 files changed, 17 insertions(+), 3 deletions(-) diff --git a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp index a91f64f04a30a..aa4371b3af6f7 100644 --- a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp +++ b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp @@ -249,7 +249,12 @@ static void parallelizeRegion(Region &sourceRegion, Region &targetRegion, if (isSafeToParallelize(&op)) { singleBuilder.clone(op, singleMapping); if (llvm::all_of(op.getOperands(), [&](Value opr) { - return rootMapping.contains(opr); + // Either we have already remapped it + bool remapped = rootMapping.contains(opr); + // Or it is available because it dominates `sr` + bool dominates = + di.properlyDominates(opr.getDefiningOp(), &*sr.begin); + return remapped || dominates; })) { // Safe to parallelize operations which have all operands available in // the root parallel block can be executed there. diff --git a/flang/test/Transforms/OpenMP/lower-workshare-correct-parallelize.mlir b/flang/test/Transforms/OpenMP/lower-workshare-correct-parallelize.mlir index 99ca4fe5a0e21..31db8213b5f00 100644 --- a/flang/test/Transforms/OpenMP/lower-workshare-correct-parallelize.mlir +++ b/flang/test/Transforms/OpenMP/lower-workshare-correct-parallelize.mlir @@ -14,3 +14,12 @@ func.func @foo() { } return } + +// CHECK: omp.single nowait +// CHECK: fir.allocmem +// CHECK: fir.shape +// CHECK: fir.declare +// CHECK: fir.freemem +// CHECK: omp.terminator +// CHECK: } +// CHECK: omp.barrier diff --git a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir index 96dc878bed0c9..83c49cd635d08 100644 --- a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir +++ b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg-dom.mlir @@ -1,6 +1,6 @@ // RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s -// CHECK: warning: omp workshare with unstructured control flow currently unsupported and will be serialized. +// CHECK: warning: omp workshare with unstructured control flow is currently unsupported and will be serialized. // CHECK: omp.parallel // CHECK-NEXT: omp.single diff --git a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir index ce8a4eb96982b..a27cf88069401 100644 --- a/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir +++ b/flang/test/Transforms/OpenMP/lower-workshare-todo-cfg.mlir @@ -1,6 +1,6 @@ // RUN: fir-opt --lower-workshare --allow-unregistered-dialect %s 2>&1 | FileCheck %s -// CHECK: warning: omp workshare with unstructured control flow currently unsupported and will be serialized. +// CHECK: warning: omp workshare with unstructured control flow is currently unsupported and will be serialized. // CHECK: omp.parallel // CHECK-NEXT: omp.single From b52a6f9e3815c41a00afff1f987b6725a44f93cb Mon Sep 17 00:00:00 2001 From: Ivan Radanov Ivanov Date: Sat, 19 Oct 2024 23:32:27 +0900 Subject: [PATCH 20/34] Do not emit empty omp.single's --- flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp | 50 ++++++++++++------- .../OpenMP/lower-workshare-no-single.mlir | 20 ++++++++ 2 files changed, 52 insertions(+), 18 deletions(-) create mode 100644 flang/test/Transforms/OpenMP/lower-workshare-no-single.mlir diff --git a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp index aa4371b3af6f7..225c585a02d91 100644 --- a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp +++ b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp @@ -239,11 +239,12 @@ static void parallelizeRegion(Region &sourceRegion, Region &targetRegion, return alloc; }; - auto moveToSingle = [&](SingleRegion sr, OpBuilder allocaBuilder, - OpBuilder singleBuilder, - OpBuilder parallelBuilder) -> SmallVector { + auto moveToSingle = + [&](SingleRegion sr, OpBuilder allocaBuilder, OpBuilder singleBuilder, + OpBuilder parallelBuilder) -> std::pair> { IRMapping singleMapping = rootMapping; SmallVector copyPrivate; + bool allParallelized = true; for (Operation &op : llvm::make_range(sr.begin, sr.end)) { if (isSafeToParallelize(&op)) { @@ -267,6 +268,7 @@ static void parallelizeRegion(Region &sourceRegion, Region &targetRegion, assert(llvm::all_of(op.getResults(), [&](Value v) { return !isTransitivelyUsedOutside(v, sr); })); + allParallelized = false; } } else if (auto alloca = dyn_cast(&op)) { auto hoisted = @@ -274,6 +276,7 @@ static void parallelizeRegion(Region &sourceRegion, Region &targetRegion, rootMapping.map(&*alloca, &*hoisted); rootMapping.map(alloca.getResult(), hoisted.getResult()); copyPrivate.push_back(hoisted); + allParallelized = false; } else { singleBuilder.clone(op, singleMapping); // Prepare reloaded values for results of operations that cannot be @@ -286,10 +289,11 @@ static void parallelizeRegion(Region &sourceRegion, Region &targetRegion, copyPrivate.push_back(alloc); } } + allParallelized = false; } } singleBuilder.create(loc); - return copyPrivate; + return {allParallelized, copyPrivate}; }; for (Block &block : sourceRegion) { @@ -343,25 +347,35 @@ static void parallelizeRegion(Region &sourceRegion, Region &targetRegion, Block *parallelBlock = new Block(); parallelBuilder.setInsertionPointToStart(parallelBlock); - omp::SingleOperands singleOperands; - if (isLast) - singleOperands.nowait = rootBuilder.getUnitAttr(); - singleOperands.copyprivateVars = + auto [allParallelized, copyprivateVars] = moveToSingle(std::get(opOrSingle), allocaBuilder, singleBuilder, parallelBuilder); - cleanupBlock(singleBlock); - for (auto var : singleOperands.copyprivateVars) { - mlir::func::FuncOp funcOp = - createCopyFunc(loc, var.getType(), firCopyFuncBuilder); - singleOperands.copyprivateSyms.push_back(SymbolRefAttr::get(funcOp)); + if (allParallelized) { + // The single region was not required as all operations were safe to + // parallelize + assert(copyprivateVars.empty()); + assert(allocaBlock->empty()); + delete singleBlock; + } else { + omp::SingleOperands singleOperands; + if (isLast) + singleOperands.nowait = rootBuilder.getUnitAttr(); + singleOperands.copyprivateVars = copyprivateVars; + cleanupBlock(singleBlock); + for (auto var : singleOperands.copyprivateVars) { + mlir::func::FuncOp funcOp = + createCopyFunc(loc, var.getType(), firCopyFuncBuilder); + singleOperands.copyprivateSyms.push_back( + SymbolRefAttr::get(funcOp)); + } + omp::SingleOp singleOp = + rootBuilder.create(loc, singleOperands); + singleOp.getRegion().push_back(singleBlock); + targetRegion.front().getOperations().splice( + singleOp->getIterator(), allocaBlock->getOperations()); } - omp::SingleOp singleOp = - rootBuilder.create(loc, singleOperands); - singleOp.getRegion().push_back(singleBlock); rootBuilder.getInsertionBlock()->getOperations().splice( rootBuilder.getInsertionPoint(), parallelBlock->getOperations()); - targetRegion.front().getOperations().splice( - singleOp->getIterator(), allocaBlock->getOperations()); delete allocaBlock; delete parallelBlock; } else { diff --git a/flang/test/Transforms/OpenMP/lower-workshare-no-single.mlir b/flang/test/Transforms/OpenMP/lower-workshare-no-single.mlir new file mode 100644 index 0000000000000..3e73816e63ace --- /dev/null +++ b/flang/test/Transforms/OpenMP/lower-workshare-no-single.mlir @@ -0,0 +1,20 @@ +// RUN: fir-opt --split-input-file --lower-workshare --allow-unregistered-dialect %s | FileCheck %s + +// Check that we do not emit an omp.single for the constant operation + +func.func @foo() { + omp.workshare { + %c1 = arith.constant 1 : index + omp.workshare.loop_wrapper { + omp.loop_nest (%arg1) : index = (%c1) to (%c1) inclusive step (%c1) { + "test.test0"() : () -> () + omp.yield + } + omp.terminator + } + omp.terminator + } + return +} + +// CHECK-NOT: omp.single From 21128e7ec08a12c85832e13bc5ad6ca63d5a975e Mon Sep 17 00:00:00 2001 From: Ivan Radanov Ivanov Date: Sun, 20 Oct 2024 01:34:12 +0900 Subject: [PATCH 21/34] LowerWorkshare tests --- flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir | 2 -- flang/test/Transforms/OpenMP/lower-workshare-binding.mlir | 2 -- flang/test/Transforms/OpenMP/lower-workshare-cleanup.mlir | 2 -- flang/test/Transforms/OpenMP/lower-workshare-copyprivate.mlir | 1 - flang/test/Transforms/OpenMP/lower-workshare-no-single.mlir | 1 - 5 files changed, 8 deletions(-) diff --git a/flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir b/flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir index 618b8d9c19b6b..12b0558d06ed5 100644 --- a/flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir +++ b/flang/test/Transforms/OpenMP/lower-workshare-alloca.mlir @@ -13,7 +13,6 @@ func.func @wsfunc() { "test.test1"(%alloc) : (!fir.ref) -> () omp.yield } - omp.terminator } "test.test2"(%alloc) : (!fir.ref) -> () omp.terminator @@ -43,7 +42,6 @@ func.func @wsfunc() { // CHECK: "test.test1"(%[[VAL_0]]) : (!fir.ref) -> () // CHECK: omp.yield // CHECK: } -// CHECK: omp.terminator // CHECK: } // CHECK: omp.single nowait { // CHECK: "test.test2"(%[[VAL_0]]) : (!fir.ref) -> () diff --git a/flang/test/Transforms/OpenMP/lower-workshare-binding.mlir b/flang/test/Transforms/OpenMP/lower-workshare-binding.mlir index 48379470e9256..f1d0e8e229614 100644 --- a/flang/test/Transforms/OpenMP/lower-workshare-binding.mlir +++ b/flang/test/Transforms/OpenMP/lower-workshare-binding.mlir @@ -14,7 +14,6 @@ func.func @wsfunc() { "test.test2"() : () -> () omp.yield } - omp.terminator } omp.terminator } @@ -38,7 +37,6 @@ func.func @wsfunc() { // CHECK: "test.test2"() : () -> () // CHECK: omp.yield // CHECK: } -// CHECK: omp.terminator // CHECK: } // CHECK: omp.terminator // CHECK: } diff --git a/flang/test/Transforms/OpenMP/lower-workshare-cleanup.mlir b/flang/test/Transforms/OpenMP/lower-workshare-cleanup.mlir index 02fe90097008d..ca288917a3ac4 100644 --- a/flang/test/Transforms/OpenMP/lower-workshare-cleanup.mlir +++ b/flang/test/Transforms/OpenMP/lower-workshare-cleanup.mlir @@ -21,7 +21,6 @@ // CHECK: "test.test2"() : () -> () // CHECK: omp.yield // CHECK: } -// CHECK: omp.terminator // CHECK: } // CHECK: omp.barrier // CHECK: omp.terminator @@ -47,7 +46,6 @@ func.func @wsfunc() { "test.test2"() : () -> () omp.yield } - omp.terminator } omp.terminator } diff --git a/flang/test/Transforms/OpenMP/lower-workshare-copyprivate.mlir b/flang/test/Transforms/OpenMP/lower-workshare-copyprivate.mlir index 5a3d583527fdd..d7a04e198ceed 100644 --- a/flang/test/Transforms/OpenMP/lower-workshare-copyprivate.mlir +++ b/flang/test/Transforms/OpenMP/lower-workshare-copyprivate.mlir @@ -59,7 +59,6 @@ func.func @wsfunc() { "test.test10"(%t6_mem_effect_use) : (i32) -> () omp.yield } - omp.terminator } "test.test10"(%t2) : (i32) -> () diff --git a/flang/test/Transforms/OpenMP/lower-workshare-no-single.mlir b/flang/test/Transforms/OpenMP/lower-workshare-no-single.mlir index 3e73816e63ace..1fd379a6e5eb4 100644 --- a/flang/test/Transforms/OpenMP/lower-workshare-no-single.mlir +++ b/flang/test/Transforms/OpenMP/lower-workshare-no-single.mlir @@ -10,7 +10,6 @@ func.func @foo() { "test.test0"() : () -> () omp.yield } - omp.terminator } omp.terminator } From e62341dffa15e15c2e307fe45ef8c33233e63cc7 Mon Sep 17 00:00:00 2001 From: Ivan Radanov Ivanov Date: Sun, 20 Oct 2024 01:35:11 +0900 Subject: [PATCH 22/34] pipelines fix --- flang/include/flang/Optimizer/Passes/Pipelines.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/flang/include/flang/Optimizer/Passes/Pipelines.h b/flang/include/flang/Optimizer/Passes/Pipelines.h index 3b54ac3883858..55fafc2e6b36f 100644 --- a/flang/include/flang/Optimizer/Passes/Pipelines.h +++ b/flang/include/flang/Optimizer/Passes/Pipelines.h @@ -123,7 +123,8 @@ void createDefaultFIROptimizerPassPipeline(mlir::PassManager &pm, /// \param optLevel - optimization level used for creating FIR optimization /// passes pipeline void createHLFIRToFIRPassPipeline( - mlir::PassManager &pm, llvm::OptimizationLevel optLevel = defaultOptLevel); + mlir::PassManager &pm, bool enableOpenMP, + llvm::OptimizationLevel optLevel = defaultOptLevel); /// Create a pass pipeline for handling certain OpenMP transformations needed /// prior to FIR lowering. From 688eead46bcec5a517050b1eb9aaa9bfe0f88285 Mon Sep 17 00:00:00 2001 From: Ivan Radanov Ivanov Date: Sun, 4 Aug 2024 17:33:52 +0900 Subject: [PATCH 23/34] Add workshare loop wrapper lowerings Bufferize test Bufferize test Bufferize test Add test for should use workshare lowering --- .../HLFIR/Transforms/BufferizeHLFIR.cpp | 4 +- .../Transforms/OptimizedBufferization.cpp | 10 +- flang/test/HLFIR/bufferize-workshare.fir | 58 ++++++++ .../OpenMP/should-use-workshare-lowering.mlir | 140 ++++++++++++++++++ 4 files changed, 208 insertions(+), 4 deletions(-) create mode 100644 flang/test/HLFIR/bufferize-workshare.fir create mode 100644 flang/test/Transforms/OpenMP/should-use-workshare-lowering.mlir diff --git a/flang/lib/Optimizer/HLFIR/Transforms/BufferizeHLFIR.cpp b/flang/lib/Optimizer/HLFIR/Transforms/BufferizeHLFIR.cpp index 07794828fce26..1848dbe2c7a2c 100644 --- a/flang/lib/Optimizer/HLFIR/Transforms/BufferizeHLFIR.cpp +++ b/flang/lib/Optimizer/HLFIR/Transforms/BufferizeHLFIR.cpp @@ -26,6 +26,7 @@ #include "flang/Optimizer/HLFIR/HLFIRDialect.h" #include "flang/Optimizer/HLFIR/HLFIROps.h" #include "flang/Optimizer/HLFIR/Passes.h" +#include "flang/Optimizer/OpenMP/Passes.h" #include "mlir/Dialect/OpenMP/OpenMPDialect.h" #include "mlir/IR/Dominance.h" #include "mlir/IR/PatternMatch.h" @@ -792,7 +793,8 @@ struct ElementalOpConversion // Generate a loop nest looping around the fir.elemental shape and clone // fir.elemental region inside the inner loop. hlfir::LoopNest loopNest = - hlfir::genLoopNest(loc, builder, extents, !elemental.isOrdered()); + hlfir::genLoopNest(loc, builder, extents, !elemental.isOrdered(), + flangomp::shouldUseWorkshareLowering(elemental)); auto insPt = builder.saveInsertionPoint(); builder.setInsertionPointToStart(loopNest.body); auto yield = hlfir::inlineElementalOp(loc, builder, elemental, diff --git a/flang/lib/Optimizer/HLFIR/Transforms/OptimizedBufferization.cpp b/flang/lib/Optimizer/HLFIR/Transforms/OptimizedBufferization.cpp index 3a0a98dc59446..f014724861e33 100644 --- a/flang/lib/Optimizer/HLFIR/Transforms/OptimizedBufferization.cpp +++ b/flang/lib/Optimizer/HLFIR/Transforms/OptimizedBufferization.cpp @@ -20,6 +20,7 @@ #include "flang/Optimizer/HLFIR/HLFIRDialect.h" #include "flang/Optimizer/HLFIR/HLFIROps.h" #include "flang/Optimizer/HLFIR/Passes.h" +#include "flang/Optimizer/OpenMP/Passes.h" #include "flang/Optimizer/Transforms/Utils.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/Dominance.h" @@ -482,7 +483,8 @@ llvm::LogicalResult ElementalAssignBufferization::matchAndRewrite( // Generate a loop nest looping around the hlfir.elemental shape and clone // hlfir.elemental region inside the inner loop hlfir::LoopNest loopNest = - hlfir::genLoopNest(loc, builder, extents, !elemental.isOrdered()); + hlfir::genLoopNest(loc, builder, extents, !elemental.isOrdered(), + flangomp::shouldUseWorkshareLowering(elemental)); builder.setInsertionPointToStart(loopNest.body); auto yield = hlfir::inlineElementalOp(loc, builder, elemental, loopNest.oneBasedIndices); @@ -553,7 +555,8 @@ llvm::LogicalResult BroadcastAssignBufferization::matchAndRewrite( llvm::SmallVector extents = hlfir::getIndexExtents(loc, builder, shape); hlfir::LoopNest loopNest = - hlfir::genLoopNest(loc, builder, extents, /*isUnordered=*/true); + hlfir::genLoopNest(loc, builder, extents, /*isUnordered=*/true, + flangomp::shouldUseWorkshareLowering(assign)); builder.setInsertionPointToStart(loopNest.body); auto arrayElement = hlfir::getElementAt(loc, builder, lhs, loopNest.oneBasedIndices); @@ -648,7 +651,8 @@ llvm::LogicalResult VariableAssignBufferization::matchAndRewrite( llvm::SmallVector extents = hlfir::getIndexExtents(loc, builder, shape); hlfir::LoopNest loopNest = - hlfir::genLoopNest(loc, builder, extents, /*isUnordered=*/true); + hlfir::genLoopNest(loc, builder, extents, /*isUnordered=*/true, + flangomp::shouldUseWorkshareLowering(assign)); builder.setInsertionPointToStart(loopNest.body); auto rhsArrayElement = hlfir::getElementAt(loc, builder, rhs, loopNest.oneBasedIndices); diff --git a/flang/test/HLFIR/bufferize-workshare.fir b/flang/test/HLFIR/bufferize-workshare.fir new file mode 100644 index 0000000000000..9b7341ae43398 --- /dev/null +++ b/flang/test/HLFIR/bufferize-workshare.fir @@ -0,0 +1,58 @@ +// RUN: fir-opt --bufferize-hlfir %s | FileCheck %s + +// CHECK-LABEL: func.func @simple( +// CHECK-SAME: %[[VAL_0:.*]]: !fir.ref>) { +// CHECK: omp.parallel { +// CHECK: omp.workshare { +// CHECK: %[[VAL_1:.*]] = arith.constant 42 : index +// CHECK: %[[VAL_2:.*]] = arith.constant 1 : i32 +// CHECK: %[[VAL_3:.*]] = fir.shape %[[VAL_1]] : (index) -> !fir.shape<1> +// CHECK: %[[VAL_4:.*]]:2 = hlfir.declare %[[VAL_0]](%[[VAL_3]]) {uniq_name = "array"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) +// CHECK: %[[VAL_5:.*]] = fir.allocmem !fir.array<42xi32> {bindc_name = ".tmp.array", uniq_name = ""} +// CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]](%[[VAL_3]]) {uniq_name = ".tmp.array"} : (!fir.heap>, !fir.shape<1>) -> (!fir.heap>, !fir.heap>) +// CHECK: %[[VAL_7:.*]] = arith.constant true +// CHECK: %[[VAL_8:.*]] = arith.constant 1 : index +// CHECK: omp.workshare.loop_wrapper { +// CHECK: omp.loop_nest (%[[VAL_9:.*]]) : index = (%[[VAL_8]]) to (%[[VAL_1]]) inclusive step (%[[VAL_8]]) { +// CHECK: %[[VAL_10:.*]] = hlfir.designate %[[VAL_4]]#0 (%[[VAL_9]]) : (!fir.ref>, index) -> !fir.ref +// CHECK: %[[VAL_11:.*]] = fir.load %[[VAL_10]] : !fir.ref +// CHECK: %[[VAL_12:.*]] = arith.subi %[[VAL_11]], %[[VAL_2]] : i32 +// CHECK: %[[VAL_13:.*]] = hlfir.designate %[[VAL_6]]#0 (%[[VAL_9]]) : (!fir.heap>, index) -> !fir.ref +// CHECK: hlfir.assign %[[VAL_12]] to %[[VAL_13]] temporary_lhs : i32, !fir.ref +// CHECK: omp.yield +// CHECK: } +// CHECK: omp.terminator +// CHECK: } +// CHECK: %[[VAL_14:.*]] = fir.undefined tuple>, i1> +// CHECK: %[[VAL_15:.*]] = fir.insert_value %[[VAL_14]], %[[VAL_7]], [1 : index] : (tuple>, i1>, i1) -> tuple>, i1> +// CHECK: %[[VAL_16:.*]] = fir.insert_value %[[VAL_15]], %[[VAL_6]]#0, [0 : index] : (tuple>, i1>, !fir.heap>) -> tuple>, i1> +// CHECK: hlfir.assign %[[VAL_6]]#0 to %[[VAL_4]]#0 : !fir.heap>, !fir.ref> +// CHECK: fir.freemem %[[VAL_6]]#0 : !fir.heap> +// CHECK: omp.terminator +// CHECK: } +// CHECK: omp.terminator +// CHECK: } +// CHECK: return +// CHECK: } +func.func @simple(%arg: !fir.ref>) { + omp.parallel { + omp.workshare { + %c42 = arith.constant 42 : index + %c1_i32 = arith.constant 1 : i32 + %shape = fir.shape %c42 : (index) -> !fir.shape<1> + %array:2 = hlfir.declare %arg(%shape) {uniq_name = "array"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) + %elemental = hlfir.elemental %shape unordered : (!fir.shape<1>) -> !hlfir.expr<42xi32> { + ^bb0(%i: index): + %ref = hlfir.designate %array#0 (%i) : (!fir.ref>, index) -> !fir.ref + %val = fir.load %ref : !fir.ref + %sub = arith.subi %val, %c1_i32 : i32 + hlfir.yield_element %sub : i32 + } + hlfir.assign %elemental to %array#0 : !hlfir.expr<42xi32>, !fir.ref> + hlfir.destroy %elemental : !hlfir.expr<42xi32> + omp.terminator + } + omp.terminator + } + return +} diff --git a/flang/test/Transforms/OpenMP/should-use-workshare-lowering.mlir b/flang/test/Transforms/OpenMP/should-use-workshare-lowering.mlir new file mode 100644 index 0000000000000..229fe592a02b9 --- /dev/null +++ b/flang/test/Transforms/OpenMP/should-use-workshare-lowering.mlir @@ -0,0 +1,140 @@ +// RUN: fir-opt --bufferize-hlfir %s | FileCheck %s + +// Checks that we correctly identify when to use the lowering to +// omp.workshare.loop_wrapper + +// CHECK-LABEL: @should_parallelize_0 +// CHECK: omp.workshare.loop_wrapper +func.func @should_parallelize_0(%arg: !fir.ref>, %idx : index) { + omp.workshare { + %c42 = arith.constant 42 : index + %c1_i32 = arith.constant 1 : i32 + %shape = fir.shape %c42 : (index) -> !fir.shape<1> + %array:2 = hlfir.declare %arg(%shape) {uniq_name = "array"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) + %elemental = hlfir.elemental %shape unordered : (!fir.shape<1>) -> !hlfir.expr<42xi32> { + ^bb0(%i: index): + hlfir.yield_element %c1_i32 : i32 + } + hlfir.assign %elemental to %array#0 : !hlfir.expr<42xi32>, !fir.ref> + hlfir.destroy %elemental : !hlfir.expr<42xi32> + omp.terminator + } + return +} + +// CHECK-LABEL: @should_parallelize_1 +// CHECK: omp.workshare.loop_wrapper +func.func @should_parallelize_1(%arg: !fir.ref>, %idx : index) { + omp.parallel { + omp.workshare { + %c42 = arith.constant 42 : index + %c1_i32 = arith.constant 1 : i32 + %shape = fir.shape %c42 : (index) -> !fir.shape<1> + %array:2 = hlfir.declare %arg(%shape) {uniq_name = "array"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) + %elemental = hlfir.elemental %shape unordered : (!fir.shape<1>) -> !hlfir.expr<42xi32> { + ^bb0(%i: index): + hlfir.yield_element %c1_i32 : i32 + } + hlfir.assign %elemental to %array#0 : !hlfir.expr<42xi32>, !fir.ref> + hlfir.destroy %elemental : !hlfir.expr<42xi32> + omp.terminator + } + omp.terminator + } + return +} + + +// CHECK-LABEL: @should_not_parallelize_0 +// CHECK-NOT: omp.workshare.loop_wrapper +func.func @should_not_parallelize_0(%arg: !fir.ref>, %idx : index) { + omp.workshare { + omp.single { + %c42 = arith.constant 42 : index + %c1_i32 = arith.constant 1 : i32 + %shape = fir.shape %c42 : (index) -> !fir.shape<1> + %array:2 = hlfir.declare %arg(%shape) {uniq_name = "array"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) + %elemental = hlfir.elemental %shape unordered : (!fir.shape<1>) -> !hlfir.expr<42xi32> { + ^bb0(%i: index): + hlfir.yield_element %c1_i32 : i32 + } + hlfir.assign %elemental to %array#0 : !hlfir.expr<42xi32>, !fir.ref> + hlfir.destroy %elemental : !hlfir.expr<42xi32> + omp.terminator + } + omp.terminator + } + return +} + +// CHECK-LABEL: @should_not_parallelize_1 +// CHECK-NOT: omp.workshare.loop_wrapper +func.func @should_not_parallelize_1(%arg: !fir.ref>, %idx : index) { + omp.workshare { + omp.critical { + %c42 = arith.constant 42 : index + %c1_i32 = arith.constant 1 : i32 + %shape = fir.shape %c42 : (index) -> !fir.shape<1> + %array:2 = hlfir.declare %arg(%shape) {uniq_name = "array"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) + %elemental = hlfir.elemental %shape unordered : (!fir.shape<1>) -> !hlfir.expr<42xi32> { + ^bb0(%i: index): + hlfir.yield_element %c1_i32 : i32 + } + hlfir.assign %elemental to %array#0 : !hlfir.expr<42xi32>, !fir.ref> + hlfir.destroy %elemental : !hlfir.expr<42xi32> + omp.terminator + } + omp.terminator + } + return +} + +// CHECK-LABEL: @should_not_parallelize_2 +// CHECK-NOT: omp.workshare.loop_wrapper +func.func @should_not_parallelize_2(%arg: !fir.ref>, %idx : index) { + omp.workshare { + omp.parallel { + %c42 = arith.constant 42 : index + %c1_i32 = arith.constant 1 : i32 + %shape = fir.shape %c42 : (index) -> !fir.shape<1> + %array:2 = hlfir.declare %arg(%shape) {uniq_name = "array"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) + %elemental = hlfir.elemental %shape unordered : (!fir.shape<1>) -> !hlfir.expr<42xi32> { + ^bb0(%i: index): + hlfir.yield_element %c1_i32 : i32 + } + hlfir.assign %elemental to %array#0 : !hlfir.expr<42xi32>, !fir.ref> + hlfir.destroy %elemental : !hlfir.expr<42xi32> + omp.terminator + } + omp.terminator + } + return +} + +// CHECK-LABEL: @should_not_parallelize_3 +// CHECK-NOT: omp.workshare.loop_wrapper +func.func @should_not_parallelize_3(%arg: !fir.ref>, %idx : index) { + omp.workshare { + omp.parallel { + omp.workshare { + omp.parallel { + %c42 = arith.constant 42 : index + %c1_i32 = arith.constant 1 : i32 + %shape = fir.shape %c42 : (index) -> !fir.shape<1> + %array:2 = hlfir.declare %arg(%shape) {uniq_name = "array"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) + %elemental = hlfir.elemental %shape unordered : (!fir.shape<1>) -> !hlfir.expr<42xi32> { + ^bb0(%i: index): + hlfir.yield_element %c1_i32 : i32 + } + hlfir.assign %elemental to %array#0 : !hlfir.expr<42xi32>, !fir.ref> + hlfir.destroy %elemental : !hlfir.expr<42xi32> + omp.terminator + } + omp.terminator + } + omp.terminator + } + omp.terminator + } + return +} From 304ec01b7cfd6f0b1044567f54fbf780756be1cd Mon Sep 17 00:00:00 2001 From: Ivan Radanov Ivanov Date: Mon, 23 Sep 2024 12:56:11 +0900 Subject: [PATCH 24/34] Add integration test for workshare --- flang/test/Integration/OpenMP/workshare.f90 | 57 +++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 flang/test/Integration/OpenMP/workshare.f90 diff --git a/flang/test/Integration/OpenMP/workshare.f90 b/flang/test/Integration/OpenMP/workshare.f90 new file mode 100644 index 0000000000000..0c4524f855290 --- /dev/null +++ b/flang/test/Integration/OpenMP/workshare.f90 @@ -0,0 +1,57 @@ +!===----------------------------------------------------------------------===! +! This directory can be used to add Integration tests involving multiple +! stages of the compiler (for eg. from Fortran to LLVM IR). It should not +! contain executable tests. We should only add tests here sparingly and only +! if there is no other way to test. Repeat this message in each test that is +! added to this directory and sub-directories. +!===----------------------------------------------------------------------===! + +!RUN: %flang_fc1 -emit-hlfir -fopenmp -O3 %s -o - | FileCheck %s --check-prefix HLFIR +!RUN: %flang_fc1 -emit-fir -fopenmp -O3 %s -o - | FileCheck %s --check-prefix FIR + +subroutine sb1(a, x, y, z) + integer :: a + integer :: x(:) + integer :: y(:) + integer :: z(:) + !$omp parallel workshare + z = a * x + y + !$omp end parallel workshare +end subroutine + +! HLFIR: func.func @_QPsb1 +! HLFIR: omp.parallel { +! HLFIR: omp.workshare { +! HLFIR: hlfir.elemental {{.*}} unordered : (!fir.shape<1>) -> !hlfir.expr { +! HLFIR: hlfir.elemental {{.*}} unordered : (!fir.shape<1>) -> !hlfir.expr { +! HLFIR: hlfir.assign +! HLFIR: hlfir.destroy +! HLFIR: hlfir.destroy +! HLFIR-NOT: omp.barrier +! HLFIR: omp.terminator +! HLFIR: } +! HLFIR-NOT: omp.barrier +! HLFIR: omp.terminator +! HLFIR: } +! HLFIR: return +! HLFIR: } +! HLFIR:} + + +! FIR: func.func private @_workshare_copy_heap_Uxi32(%{{[a-z0-9]+}}: !fir.ref>>, %{{[a-z0-9]+}}: !fir.ref>> +! FIR: func.func private @_workshare_copy_i32(%{{[a-z0-9]+}}: !fir.ref, %{{[a-z0-9]+}}: !fir.ref + +! FIR: func.func @_QPsb1 +! FIR: omp.parallel { +! FIR: omp.single copyprivate(%9 -> @_workshare_copy_i32 : !fir.ref, %10 -> @_workshare_copy_heap_Uxi32 : !fir.ref>>) { +! FIR: fir.allocmem +! FIR: omp.wsloop { +! FIR: omp.loop_nest +! FIR: omp.single nowait { +! FIR: fir.call @_FortranAAssign +! FIR: fir.freemem +! FIR: omp.terminator +! FIR: } +! FIR: omp.barrier +! FIR: omp.terminator +! FIR: } From 754d54b54822b2877329b4c9be3a5924f634189a Mon Sep 17 00:00:00 2001 From: Ivan Radanov Ivanov Date: Fri, 4 Oct 2024 15:02:54 +0900 Subject: [PATCH 25/34] One more integration test --- .../OpenMP/workshare-scalar-array-mul.f90 | 67 +++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 flang/test/Integration/OpenMP/workshare-scalar-array-mul.f90 diff --git a/flang/test/Integration/OpenMP/workshare-scalar-array-mul.f90 b/flang/test/Integration/OpenMP/workshare-scalar-array-mul.f90 new file mode 100644 index 0000000000000..2fb9a029bf93a --- /dev/null +++ b/flang/test/Integration/OpenMP/workshare-scalar-array-mul.f90 @@ -0,0 +1,67 @@ +!===----------------------------------------------------------------------===! +! This directory can be used to add Integration tests involving multiple +! stages of the compiler (for eg. from Fortran to LLVM IR). It should not +! contain executable tests. We should only add tests here sparingly and only +! if there is no other way to test. Repeat this message in each test that is +! added to this directory and sub-directories. +!===----------------------------------------------------------------------===! + +!RUN: %flang_fc1 -emit-hlfir -fopenmp -O3 %s -o - | FileCheck %s --check-prefix HLFIR-O3 +!RUN: %flang_fc1 -emit-fir -fopenmp -O3 %s -o - | FileCheck %s --check-prefix FIR-O3 + +!RUN: %flang_fc1 -emit-hlfir -fopenmp -O0 %s -o - | FileCheck %s --check-prefix HLFIR-O0 +!RUN: %flang_fc1 -emit-fir -fopenmp -O0 %s -o - | FileCheck %s --check-prefix FIR-O0 + +program test + real :: arr_01(10) + !$omp parallel workshare + arr_01 = arr_01*2 + !$omp end parallel workshare +end program + +! HLFIR-O3: omp.parallel { +! HLFIR-O3: omp.workshare { +! HLFIR-O3: hlfir.elemental +! HLFIR-O3: hlfir.assign +! HLFIR-O3: hlfir.destroy +! HLFIR-O3: omp.terminator +! HLFIR-O3: omp.terminator + +! FIR-O3: omp.parallel { +! FIR-O3: omp.wsloop nowait { +! FIR-O3: omp.loop_nest +! FIR-O3: omp.terminator +! FIR-O3: omp.barrier +! FIR-O3: omp.terminator + +! HLFIR-O0: omp.parallel { +! HLFIR-O0: omp.workshare { +! HLFIR-O0: hlfir.elemental +! HLFIR-O0: hlfir.assign +! HLFIR-O0: hlfir.destroy +! HLFIR-O0: omp.terminator +! HLFIR-O0: omp.terminator + +! Check the copyprivate copy function +! FIR-O0: func.func private @_workshare_copy_heap_{{.*}}(%[[DST:.*]]: {{.*}}, %[[SRC:.*]]: {{.*}}) +! FIR-O0: fir.load %[[SRC]] +! FIR-O0: fir.store {{.*}} to %[[DST]] + +! Check that we properly handle the temporary array +! FIR-O0: omp.parallel { +! FIR-O0: %[[CP:.*]] = fir.alloca !fir.heap> +! FIR-O0: omp.single copyprivate(%[[CP]] -> @_workshare_copy_heap_ +! FIR-O0: fir.allocmem +! FIR-O0: fir.store +! FIR-O0: omp.terminator +! FIR-O0: fir.load %[[CP]] +! FIR-O0: omp.wsloop { +! FIR-O0: omp.loop_nest +! FIR-O0: omp.yield +! FIR-O0: omp.terminator +! FIR-O0: omp.single nowait { +! FIR-O0: fir.call @_FortranAAssign +! FIR-O0: fir.freemem +! FIR-O0: omp.terminator +! FIR-O0: omp.barrier +! FIR-O0: omp.terminator From fed4884e8e55b0eece7e79a84910f32ecc876e42 Mon Sep 17 00:00:00 2001 From: Ivan Radanov Ivanov Date: Fri, 4 Oct 2024 15:12:43 +0900 Subject: [PATCH 26/34] Add test for cfg workshare bufferization --- .../should-use-workshare-lowering-cfg.mlir | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 flang/test/Transforms/OpenMP/should-use-workshare-lowering-cfg.mlir diff --git a/flang/test/Transforms/OpenMP/should-use-workshare-lowering-cfg.mlir b/flang/test/Transforms/OpenMP/should-use-workshare-lowering-cfg.mlir new file mode 100644 index 0000000000000..8b6d8097caad8 --- /dev/null +++ b/flang/test/Transforms/OpenMP/should-use-workshare-lowering-cfg.mlir @@ -0,0 +1,22 @@ +// RUN: fir-opt --bufferize-hlfir %s 2>&1 | FileCheck %s + +// CHECK: warning: omp workshare with unstructured control flow currently unsupported. +func.func @warn_cfg(%arg: !fir.ref>, %idx : index) { + omp.workshare { + ^bb1: + %c42 = arith.constant 42 : index + %c1_i32 = arith.constant 1 : i32 + %shape = fir.shape %c42 : (index) -> !fir.shape<1> + %array:2 = hlfir.declare %arg(%shape) {uniq_name = "array"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) + %elemental = hlfir.elemental %shape unordered : (!fir.shape<1>) -> !hlfir.expr<42xi32> { + ^bb0(%i: index): + hlfir.yield_element %c1_i32 : i32 + } + hlfir.assign %elemental to %array#0 : !hlfir.expr<42xi32>, !fir.ref> + hlfir.destroy %elemental : !hlfir.expr<42xi32> + cf.br ^bb2 + ^bb2: + omp.terminator + } + return +} From 99d20f9736fc09eb4e2ce9546f7fcd99751ace2a Mon Sep 17 00:00:00 2001 From: Ivan Radanov Ivanov Date: Fri, 4 Oct 2024 15:24:46 +0900 Subject: [PATCH 27/34] Fix tests --- .../should-use-workshare-lowering-cfg.mlir | 22 ------------------- .../OpenMP/should-use-workshare-lowering.mlir | 22 +++++++++++++++++++ 2 files changed, 22 insertions(+), 22 deletions(-) delete mode 100644 flang/test/Transforms/OpenMP/should-use-workshare-lowering-cfg.mlir diff --git a/flang/test/Transforms/OpenMP/should-use-workshare-lowering-cfg.mlir b/flang/test/Transforms/OpenMP/should-use-workshare-lowering-cfg.mlir deleted file mode 100644 index 8b6d8097caad8..0000000000000 --- a/flang/test/Transforms/OpenMP/should-use-workshare-lowering-cfg.mlir +++ /dev/null @@ -1,22 +0,0 @@ -// RUN: fir-opt --bufferize-hlfir %s 2>&1 | FileCheck %s - -// CHECK: warning: omp workshare with unstructured control flow currently unsupported. -func.func @warn_cfg(%arg: !fir.ref>, %idx : index) { - omp.workshare { - ^bb1: - %c42 = arith.constant 42 : index - %c1_i32 = arith.constant 1 : i32 - %shape = fir.shape %c42 : (index) -> !fir.shape<1> - %array:2 = hlfir.declare %arg(%shape) {uniq_name = "array"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) - %elemental = hlfir.elemental %shape unordered : (!fir.shape<1>) -> !hlfir.expr<42xi32> { - ^bb0(%i: index): - hlfir.yield_element %c1_i32 : i32 - } - hlfir.assign %elemental to %array#0 : !hlfir.expr<42xi32>, !fir.ref> - hlfir.destroy %elemental : !hlfir.expr<42xi32> - cf.br ^bb2 - ^bb2: - omp.terminator - } - return -} diff --git a/flang/test/Transforms/OpenMP/should-use-workshare-lowering.mlir b/flang/test/Transforms/OpenMP/should-use-workshare-lowering.mlir index 229fe592a02b9..91b08123cce42 100644 --- a/flang/test/Transforms/OpenMP/should-use-workshare-lowering.mlir +++ b/flang/test/Transforms/OpenMP/should-use-workshare-lowering.mlir @@ -138,3 +138,25 @@ func.func @should_not_parallelize_3(%arg: !fir.ref>, %idx : i } return } + +// CHECK-LABEL: @should_not_parallelize_4 +// CHECK-NOT: omp.workshare.loop_wrapper +func.func @should_not_parallelize_4(%arg: !fir.ref>, %idx : index) { + omp.workshare { + ^bb1: + %c42 = arith.constant 42 : index + %c1_i32 = arith.constant 1 : i32 + %shape = fir.shape %c42 : (index) -> !fir.shape<1> + %array:2 = hlfir.declare %arg(%shape) {uniq_name = "array"} : (!fir.ref>, !fir.shape<1>) -> (!fir.ref>, !fir.ref>) + %elemental = hlfir.elemental %shape unordered : (!fir.shape<1>) -> !hlfir.expr<42xi32> { + ^bb0(%i: index): + hlfir.yield_element %c1_i32 : i32 + } + hlfir.assign %elemental to %array#0 : !hlfir.expr<42xi32>, !fir.ref> + hlfir.destroy %elemental : !hlfir.expr<42xi32> + cf.br ^bb2 + ^bb2: + omp.terminator + } + return +} From d234efec26d9d7882975c4ef90e9eceff2b45a26 Mon Sep 17 00:00:00 2001 From: Ivan Radanov Ivanov Date: Sat, 19 Oct 2024 23:30:42 +0900 Subject: [PATCH 28/34] Test coverage for all changes --- .../OpenMP/workshare-array-array-assign.f90 | 35 ++++++++++++++ .../{workshare.f90 => workshare-axpy.f90} | 0 .../OpenMP/workshare-scalar-array-assign.f90 | 46 +++++++++++++++++++ 3 files changed, 81 insertions(+) create mode 100644 flang/test/Integration/OpenMP/workshare-array-array-assign.f90 rename flang/test/Integration/OpenMP/{workshare.f90 => workshare-axpy.f90} (100%) create mode 100644 flang/test/Integration/OpenMP/workshare-scalar-array-assign.f90 diff --git a/flang/test/Integration/OpenMP/workshare-array-array-assign.f90 b/flang/test/Integration/OpenMP/workshare-array-array-assign.f90 new file mode 100644 index 0000000000000..065f72d5d72d8 --- /dev/null +++ b/flang/test/Integration/OpenMP/workshare-array-array-assign.f90 @@ -0,0 +1,35 @@ +!===----------------------------------------------------------------------===! +! This directory can be used to add Integration tests involving multiple +! stages of the compiler (for eg. from Fortran to LLVM IR). It should not +! contain executable tests. We should only add tests here sparingly and only +! if there is no other way to test. Repeat this message in each test that is +! added to this directory and sub-directories. +!===----------------------------------------------------------------------===! + +!RUN: %flang_fc1 -emit-hlfir -fopenmp -O3 %s -o - | FileCheck %s --check-prefix HLFIR +!RUN: %flang_fc1 -emit-fir -fopenmp -O3 %s -o - | FileCheck %s --check-prefix FIR + +subroutine sb1(x, y) + integer :: x(:) + integer :: y(:) + !$omp parallel workshare + x = y + !$omp end parallel workshare +end subroutine + +! HLFIR: omp.parallel { +! HLFIR: omp.workshare { +! HLFIR: hlfir.assign +! HLFIR: omp.terminator +! HLFIR: } +! HLFIR: omp.terminator +! HLFIR: } + +! FIR: omp.parallel { +! FIR: omp.wsloop nowait { +! FIR: omp.loop_nest +! FIR: omp.terminator +! FIR: } +! FIR: omp.barrier +! FIR: omp.terminator +! FIR: } diff --git a/flang/test/Integration/OpenMP/workshare.f90 b/flang/test/Integration/OpenMP/workshare-axpy.f90 similarity index 100% rename from flang/test/Integration/OpenMP/workshare.f90 rename to flang/test/Integration/OpenMP/workshare-axpy.f90 diff --git a/flang/test/Integration/OpenMP/workshare-scalar-array-assign.f90 b/flang/test/Integration/OpenMP/workshare-scalar-array-assign.f90 new file mode 100644 index 0000000000000..fad1af110792b --- /dev/null +++ b/flang/test/Integration/OpenMP/workshare-scalar-array-assign.f90 @@ -0,0 +1,46 @@ +!===----------------------------------------------------------------------===! +! This directory can be used to add Integration tests involving multiple +! stages of the compiler (for eg. from Fortran to LLVM IR). It should not +! contain executable tests. We should only add tests here sparingly and only +! if there is no other way to test. Repeat this message in each test that is +! added to this directory and sub-directories. +!===----------------------------------------------------------------------===! + +!RUN: %flang_fc1 -emit-hlfir -fopenmp -O3 %s -o - | FileCheck %s --check-prefix HLFIR +!RUN: %flang_fc1 -emit-fir -fopenmp -O3 %s -o - | FileCheck %s --check-prefix FIR + +subroutine sb1(a, x) + integer :: a + integer :: x(:) + !$omp parallel workshare + x = a + !$omp end parallel workshare +end subroutine + +! HLFIR: omp.parallel { +! HLFIR: omp.workshare { +! HLFIR: %[[SCALAR:.*]] = fir.load %1#0 : !fir.ref +! HLFIR: hlfir.assign %[[SCALAR]] to +! HLFIR: omp.terminator +! HLFIR: } +! HLFIR: omp.terminator +! HLFIR: } + +! FIR: omp.parallel { +! FIR: %[[SCALAR_ALLOCA:.*]] = fir.alloca i32 +! FIR: omp.single copyprivate(%[[SCALAR_ALLOCA]] -> @_workshare_copy_i32 : !fir.ref) { +! FIR: %[[SCALAR_LOAD:.*]] = fir.load %{{.*}} : !fir.ref +! FIR: fir.store %[[SCALAR_LOAD]] to %[[SCALAR_ALLOCA]] : !fir.ref +! FIR: omp.terminator +! FIR: } +! FIR: %[[SCALAR_RELOAD:.*]] = fir.load %[[SCALAR_ALLOCA]] : !fir.ref +! FIR: %6:3 = fir.box_dims %3, %c0 : (!fir.box>, index) -> (index, index, index) +! FIR: omp.wsloop nowait { +! FIR: omp.loop_nest (%arg2) : index = (%c1) to (%6#1) inclusive step (%c1) { +! FIR: fir.store %[[SCALAR_RELOAD]] +! FIR: omp.yield +! FIR: } +! FIR: omp.terminator +! FIR: } +! FIR: omp.barrier +! FIR: omp.terminator From d35b0c83b96f659e54ad6de700eaed591e43ea90 Mon Sep 17 00:00:00 2001 From: Ivan Radanov Ivanov Date: Sun, 20 Oct 2024 01:34:31 +0900 Subject: [PATCH 29/34] Integration tests --- flang/test/Integration/OpenMP/workshare-array-array-assign.f90 | 1 - flang/test/Integration/OpenMP/workshare-scalar-array-assign.f90 | 1 - flang/test/Integration/OpenMP/workshare-scalar-array-mul.f90 | 2 -- 3 files changed, 4 deletions(-) diff --git a/flang/test/Integration/OpenMP/workshare-array-array-assign.f90 b/flang/test/Integration/OpenMP/workshare-array-array-assign.f90 index 065f72d5d72d8..e9ec5d9175beb 100644 --- a/flang/test/Integration/OpenMP/workshare-array-array-assign.f90 +++ b/flang/test/Integration/OpenMP/workshare-array-array-assign.f90 @@ -28,7 +28,6 @@ subroutine sb1(x, y) ! FIR: omp.parallel { ! FIR: omp.wsloop nowait { ! FIR: omp.loop_nest -! FIR: omp.terminator ! FIR: } ! FIR: omp.barrier ! FIR: omp.terminator diff --git a/flang/test/Integration/OpenMP/workshare-scalar-array-assign.f90 b/flang/test/Integration/OpenMP/workshare-scalar-array-assign.f90 index fad1af110792b..6c180cd639997 100644 --- a/flang/test/Integration/OpenMP/workshare-scalar-array-assign.f90 +++ b/flang/test/Integration/OpenMP/workshare-scalar-array-assign.f90 @@ -40,7 +40,6 @@ subroutine sb1(a, x) ! FIR: fir.store %[[SCALAR_RELOAD]] ! FIR: omp.yield ! FIR: } -! FIR: omp.terminator ! FIR: } ! FIR: omp.barrier ! FIR: omp.terminator diff --git a/flang/test/Integration/OpenMP/workshare-scalar-array-mul.f90 b/flang/test/Integration/OpenMP/workshare-scalar-array-mul.f90 index 2fb9a029bf93a..9b8ef66b48f47 100644 --- a/flang/test/Integration/OpenMP/workshare-scalar-array-mul.f90 +++ b/flang/test/Integration/OpenMP/workshare-scalar-array-mul.f90 @@ -30,7 +30,6 @@ program test ! FIR-O3: omp.parallel { ! FIR-O3: omp.wsloop nowait { ! FIR-O3: omp.loop_nest -! FIR-O3: omp.terminator ! FIR-O3: omp.barrier ! FIR-O3: omp.terminator @@ -58,7 +57,6 @@ program test ! FIR-O0: omp.wsloop { ! FIR-O0: omp.loop_nest ! FIR-O0: omp.yield -! FIR-O0: omp.terminator ! FIR-O0: omp.single nowait { ! FIR-O0: fir.call @_FortranAAssign ! FIR-O0: fir.freemem From b76551be57566e934936606ee512ce21fb06f0f7 Mon Sep 17 00:00:00 2001 From: Ivan Radanov Ivanov Date: Sun, 20 Oct 2024 01:35:36 +0900 Subject: [PATCH 30/34] bufferize fix --- flang/test/HLFIR/bufferize-workshare.fir | 1 - 1 file changed, 1 deletion(-) diff --git a/flang/test/HLFIR/bufferize-workshare.fir b/flang/test/HLFIR/bufferize-workshare.fir index 9b7341ae43398..af5abb381937e 100644 --- a/flang/test/HLFIR/bufferize-workshare.fir +++ b/flang/test/HLFIR/bufferize-workshare.fir @@ -21,7 +21,6 @@ // CHECK: hlfir.assign %[[VAL_12]] to %[[VAL_13]] temporary_lhs : i32, !fir.ref // CHECK: omp.yield // CHECK: } -// CHECK: omp.terminator // CHECK: } // CHECK: %[[VAL_14:.*]] = fir.undefined tuple>, i1> // CHECK: %[[VAL_15:.*]] = fir.insert_value %[[VAL_14]], %[[VAL_7]], [1 : index] : (tuple>, i1>, i1) -> tuple>, i1> From 3a48a370d212f50f86eb017afeb2f4d8dbf9bd3e Mon Sep 17 00:00:00 2001 From: Thirumalai-Shaktivel Date: Thu, 3 Oct 2024 10:08:41 +0000 Subject: [PATCH 31/34] [CMake] Fix build failure --- flang/lib/Optimizer/HLFIR/Transforms/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/flang/lib/Optimizer/HLFIR/Transforms/CMakeLists.txt b/flang/lib/Optimizer/HLFIR/Transforms/CMakeLists.txt index fa3a59303137f..43da1eb92d030 100644 --- a/flang/lib/Optimizer/HLFIR/Transforms/CMakeLists.txt +++ b/flang/lib/Optimizer/HLFIR/Transforms/CMakeLists.txt @@ -26,6 +26,7 @@ add_flang_library(HLFIRTransforms FIRTransforms HLFIRDialect MLIRIR + FlangOpenMPTransforms ${dialect_libs} LINK_COMPONENTS From 52b71419e758a1123e429c4df259a1aff6523b0c Mon Sep 17 00:00:00 2001 From: Thirumalai-Shaktivel Date: Sun, 20 Oct 2024 09:16:18 +0000 Subject: [PATCH 32/34] [Flang][Pass] Move LowerWorkshare pass before LowerHLFIRIntrinsics pass --- flang/lib/Optimizer/Passes/Pipelines.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flang/lib/Optimizer/Passes/Pipelines.cpp b/flang/lib/Optimizer/Passes/Pipelines.cpp index c1a5902b74788..3a9166f2e0aa5 100644 --- a/flang/lib/Optimizer/Passes/Pipelines.cpp +++ b/flang/lib/Optimizer/Passes/Pipelines.cpp @@ -227,11 +227,11 @@ void createHLFIRToFIRPassPipeline(mlir::PassManager &pm, bool enableOpenMP, hlfir::createOptimizedBufferization); } pm.addPass(hlfir::createLowerHLFIROrderedAssignments()); + if (enableOpenMP) + pm.addPass(flangomp::createLowerWorkshare()); pm.addPass(hlfir::createLowerHLFIRIntrinsics()); pm.addPass(hlfir::createBufferizeHLFIR()); pm.addPass(hlfir::createConvertHLFIRtoFIR()); - if (enableOpenMP) - pm.addPass(flangomp::createLowerWorkshare()); } /// Create a pass pipeline for handling certain OpenMP transformations needed From 2236526ce084e30039a8ff74251ea796868ae712 Mon Sep 17 00:00:00 2001 From: Thirumalai-Shaktivel Date: Sun, 20 Oct 2024 09:21:50 +0000 Subject: [PATCH 33/34] [NFC][Flang][Pass] Add SUM intrinsic operations inside the workshare construct --- flang/lib/Optimizer/OpenMP/CMakeLists.txt | 1 + flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp | 151 ++++++++++++++---- 2 files changed, 117 insertions(+), 35 deletions(-) diff --git a/flang/lib/Optimizer/OpenMP/CMakeLists.txt b/flang/lib/Optimizer/OpenMP/CMakeLists.txt index 39e92d388288d..776798d723911 100644 --- a/flang/lib/Optimizer/OpenMP/CMakeLists.txt +++ b/flang/lib/Optimizer/OpenMP/CMakeLists.txt @@ -21,6 +21,7 @@ add_flang_library(FlangOpenMPTransforms FortranCommon MLIRFuncDialect MLIROpenMPDialect + MLIRArithDialect HLFIRDialect MLIRIR MLIRPass diff --git a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp index 225c585a02d91..9fb84f3e099c4 100644 --- a/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp +++ b/flang/lib/Optimizer/OpenMP/LowerWorkshare.cpp @@ -16,6 +16,7 @@ // //===----------------------------------------------------------------------===// +#include "flang/Optimizer/Builder/HLFIRTools.h" #include #include #include @@ -335,49 +336,129 @@ static void parallelizeRegion(Region &sourceRegion, Region &targetRegion, for (auto [i, opOrSingle] : llvm::enumerate(regions)) { bool isLast = i + 1 == regions.size(); if (std::holds_alternative(opOrSingle)) { - OpBuilder singleBuilder(sourceRegion.getContext()); - Block *singleBlock = new Block(); - singleBuilder.setInsertionPointToStart(singleBlock); - OpBuilder allocaBuilder(sourceRegion.getContext()); Block *allocaBlock = new Block(); allocaBuilder.setInsertionPointToStart(allocaBlock); - OpBuilder parallelBuilder(sourceRegion.getContext()); - Block *parallelBlock = new Block(); - parallelBuilder.setInsertionPointToStart(parallelBlock); - - auto [allParallelized, copyprivateVars] = - moveToSingle(std::get(opOrSingle), allocaBuilder, - singleBuilder, parallelBuilder); - if (allParallelized) { - // The single region was not required as all operations were safe to - // parallelize - assert(copyprivateVars.empty()); - assert(allocaBlock->empty()); - delete singleBlock; + it = block.begin(); + while (&*it != terminator) + if (isa(it)) + break; + else + it++; + + if (auto sumOp = dyn_cast(it)) { + /// Implementation: + /// Intrinsic function `SUM` operations + /// -- + /// x = sum(array) + /// + /// is converted to + /// + /// !$omp parallel do + /// do i = 1, size(array) + /// x = x + array(i) + /// end do + /// !$omp end parallel do + + OpBuilder wslBuilder(sourceRegion.getContext()); + Block *wslBlock = new Block(); + wslBuilder.setInsertionPointToStart(wslBlock); + + Value target = dyn_cast(++it).getLhs(); + Value array = sumOp.getArray(); + Value dim = sumOp.getDim(); + fir::SequenceType arrayTy = dyn_cast( + hlfir::getFortranElementOrSequenceType(array.getType())); + llvm::ArrayRef arrayShape = arrayTy.getShape(); + if (arrayShape.size() == 1 && !dim) { + Value itr = allocaBuilder.create( + loc, allocaBuilder.getI64Type()); + Value c_one = allocaBuilder.create( + loc, allocaBuilder.getI64IntegerAttr(1)); + Value c_arr_size = allocaBuilder.create( + loc, allocaBuilder.getI64IntegerAttr(arrayShape[0])); + // Value c_zero = allocaBuilder.create(loc, + // allocaBuilder.getZeroAttr(arrayTy.getEleTy())); + // allocaBuilder.create(loc, c_zero, target); + + omp::WsloopOperands wslOps; + omp::WsloopOp wslOp = + rootBuilder.create(loc, wslOps); + + hlfir::LoopNest ln; + ln.outerOp = wslOp; + omp::LoopNestOperands lnOps; + lnOps.loopLowerBounds.push_back(c_one); + lnOps.loopUpperBounds.push_back(c_arr_size); + lnOps.loopSteps.push_back(c_one); + lnOps.loopInclusive = wslBuilder.getUnitAttr(); + omp::LoopNestOp lnOp = + wslBuilder.create(loc, lnOps); + Block *lnBlock = wslBuilder.createBlock(&lnOp.getRegion()); + lnBlock->addArgument(c_one.getType(), loc); + wslBuilder.create( + loc, lnOp.getRegion().getArgument(0), itr); + Value tarLoad = wslBuilder.create(loc, target); + Value itrLoad = wslBuilder.create(loc, itr); + hlfir::DesignateOp arrDesOp = wslBuilder.create( + loc, fir::ReferenceType::get(arrayTy.getEleTy()), array, + itrLoad); + Value desLoad = wslBuilder.create(loc, arrDesOp); + Value addf = + wslBuilder.create(loc, tarLoad, desLoad); + wslBuilder.create(loc, addf, target); + wslBuilder.create(loc); + ln.body = lnBlock; + wslOp.getRegion().push_back(wslBlock); + targetRegion.front().getOperations().splice( + wslOp->getIterator(), allocaBlock->getOperations()); + } else { + emitError(loc, "Only 1D array scalar assignment for sum " + "instrinsic is supported in workshare construct"); + return; + } } else { - omp::SingleOperands singleOperands; - if (isLast) - singleOperands.nowait = rootBuilder.getUnitAttr(); - singleOperands.copyprivateVars = copyprivateVars; - cleanupBlock(singleBlock); - for (auto var : singleOperands.copyprivateVars) { - mlir::func::FuncOp funcOp = - createCopyFunc(loc, var.getType(), firCopyFuncBuilder); - singleOperands.copyprivateSyms.push_back( - SymbolRefAttr::get(funcOp)); + OpBuilder singleBuilder(sourceRegion.getContext()); + Block *singleBlock = new Block(); + singleBuilder.setInsertionPointToStart(singleBlock); + + OpBuilder parallelBuilder(sourceRegion.getContext()); + Block *parallelBlock = new Block(); + parallelBuilder.setInsertionPointToStart(parallelBlock); + + auto [allParallelized, copyprivateVars] = + moveToSingle(std::get(opOrSingle), allocaBuilder, + singleBuilder, parallelBuilder); + if (allParallelized) { + // The single region was not required as all operations were safe to + // parallelize + assert(copyprivateVars.empty()); + assert(allocaBlock->empty()); + delete singleBlock; + } else { + omp::SingleOperands singleOperands; + if (isLast) + singleOperands.nowait = rootBuilder.getUnitAttr(); + singleOperands.copyprivateVars = copyprivateVars; + cleanupBlock(singleBlock); + for (auto var : singleOperands.copyprivateVars) { + mlir::func::FuncOp funcOp = + createCopyFunc(loc, var.getType(), firCopyFuncBuilder); + singleOperands.copyprivateSyms.push_back( + SymbolRefAttr::get(funcOp)); + } + omp::SingleOp singleOp = + rootBuilder.create(loc, singleOperands); + singleOp.getRegion().push_back(singleBlock); + targetRegion.front().getOperations().splice( + singleOp->getIterator(), allocaBlock->getOperations()); } - omp::SingleOp singleOp = - rootBuilder.create(loc, singleOperands); - singleOp.getRegion().push_back(singleBlock); - targetRegion.front().getOperations().splice( - singleOp->getIterator(), allocaBlock->getOperations()); + rootBuilder.getInsertionBlock()->getOperations().splice( + rootBuilder.getInsertionPoint(), parallelBlock->getOperations()); + delete parallelBlock; } - rootBuilder.getInsertionBlock()->getOperations().splice( - rootBuilder.getInsertionPoint(), parallelBlock->getOperations()); delete allocaBlock; - delete parallelBlock; } else { auto op = std::get(opOrSingle); if (auto wslw = dyn_cast(op)) { From 6c9d9f5215babedfb1d45719a7a763f2726b3eaf Mon Sep 17 00:00:00 2001 From: Thirumalai-Shaktivel Date: Sun, 20 Oct 2024 09:32:42 +0000 Subject: [PATCH 34/34] [Test] Add and update tests --- flang/test/Fir/basic-program.fir | 2 +- flang/test/Integration/OpenMP/workshare02.f90 | 36 +++++++++++++++++++ 2 files changed, 37 insertions(+), 1 deletion(-) create mode 100644 flang/test/Integration/OpenMP/workshare02.f90 diff --git a/flang/test/Fir/basic-program.fir b/flang/test/Fir/basic-program.fir index 4b18acb7c2b43..9b9651a476e58 100644 --- a/flang/test/Fir/basic-program.fir +++ b/flang/test/Fir/basic-program.fir @@ -44,10 +44,10 @@ func.func @_QQmain() { // PASSES-NEXT: 'omp.private' Pipeline // PASSES-NEXT: OptimizedBufferization // PASSES-NEXT: LowerHLFIROrderedAssignments +// PASSES-NEXT: LowerWorkshare // PASSES-NEXT: LowerHLFIRIntrinsics // PASSES-NEXT: BufferizeHLFIR // PASSES-NEXT: ConvertHLFIRtoFIR -// PASSES-NEXT: LowerWorkshare // PASSES-NEXT: CSE // PASSES-NEXT: (S) 0 num-cse'd - Number of operations CSE'd // PASSES-NEXT: (S) 0 num-dce'd - Number of operations DCE'd diff --git a/flang/test/Integration/OpenMP/workshare02.f90 b/flang/test/Integration/OpenMP/workshare02.f90 new file mode 100644 index 0000000000000..68b810a32f247 --- /dev/null +++ b/flang/test/Integration/OpenMP/workshare02.f90 @@ -0,0 +1,36 @@ +!===----------------------------------------------------------------------===! +! This directory can be used to add Integration tests involving multiple +! stages of the compiler (for eg. from Fortran to LLVM IR). It should not +! contain executable tests. We should only add tests here sparingly and only +! if there is no other way to test. Repeat this message in each test that is +! added to this directory and sub-directories. +!===----------------------------------------------------------------------===! + +!RUN: %flang_fc1 -emit-mlir -fopenmp -O3 %s -o - | FileCheck %s --check-prefix MLIR + +program test_ws_01 + implicit none + real(8) :: arr_01(10), x + arr_01 = [0.347,0.892,0.573,0.126,0.788,0.412,0.964,0.205,0.631,0.746] + + !$omp parallel workshare + x = sum(arr_01) + !$omp end parallel workshare +end program test_ws_01 + +! MLIR: func.func @_QQmain +! MLIR: omp.parallel { +! [...] +! MLIR: omp.wsloop { +! MLIR: omp.loop_nest {{.*}} +! [...] +! MLIR: %[[SUM:.*]] = arith.addf {{.*}} +! [...] +! MLIR: omp.yield +! MLIR: } +! MLIR: } +! MLIR: omp.barrier +! MLIR: omp.terminator +! MLIR: } +! MLIR: return +! MLIR: }