Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP] High-Level (Coase grain) parallelization with ONNXParallelOp and ONNXForkOp based on OpenMP #2756

Draft
wants to merge 37 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
37 commits
Select commit Hold shift + click to select a range
c9f68fd
Added ONNXParallelOp and ONNXForkOp
imaihal Mar 15, 2024
8f87a9c
Add lowering pass for ONNXParallelOp and ONNXForkOp
imaihal Mar 15, 2024
9234f25
Remove SCFToCF Pass
imaihal Mar 15, 2024
98aef8a
New files for ONNXParallelOp and ONNXForkOp
imaihal Mar 15, 2024
796aa80
Revert: enable deallocation op as ilegal op
imaihal Mar 18, 2024
84f9f0d
Clean up by creating a function for moving alloc op
imaihal Mar 21, 2024
f24dac6
Merge branch 'main' into parallelop_forkop_with_omp
imaihal Mar 21, 2024
e6591f0
Update KrnlToAffine pass to support scf::ifOp in body of KrnlIterateOp.
imaihal Mar 26, 2024
0db91dc
Update comments.
imaihal Mar 26, 2024
22cbc08
Merge branch 'main' into parallelop_forkop_with_omp
imaihal Mar 26, 2024
f24c9a6
Merge branch 'main' into parallelop_forkop_with_omp
imaihal Mar 27, 2024
c4f0a3b
Dynamic dim support v1.
imaihal Mar 29, 2024
f7fc4ca
Fix static dim case.
imaihal Apr 1, 2024
38161c1
Merge branch 'main' into parallelop_forkop_with_omp
imaihal Apr 1, 2024
c8f8075
Add KrnlRegionOp in region of scf::ifOp.
imaihal Apr 2, 2024
0fbfa9d
Merge branch 'main' into parallelop_forkop_with_omp
imaihal Apr 2, 2024
8d10b0b
Update dynamic dim support.
imaihal Apr 3, 2024
ed5fec1
Merge branch 'main' into parallelop_forkop_with_omp
imaihal Apr 3, 2024
9077e7d
Update condition to apply unstickStickRemoval optimization
imaihal Apr 4, 2024
91b6f46
Update comments
imaihal Apr 4, 2024
0e06aec
Merge branch 'main' into parallelop_forkop_with_omp
imaihal Apr 4, 2024
c73d8ad
Merge branch 'main' into parallelop_forkop_with_omp
imaihal Apr 5, 2024
d934273
Merge branch 'main' into parallelop_forkop_with_omp
imaihal Apr 12, 2024
9a45f0f
Added lit test for shape inference.
imaihal Apr 12, 2024
dad1f99
Fix lit test.
imaihal Apr 13, 2024
6d111a7
Update dynamic dim support
imaihal Apr 17, 2024
44396c4
Merge branch 'main' into parallelop_forkop_with_omp
imaihal Apr 17, 2024
37e12b2
Fix insertion of KrnlRegionOp.
imaihal Apr 22, 2024
c5e7663
Add shapeHelpr for ONNXParallelOp and ONNXForkOp.
imaihal Apr 25, 2024
2f31a16
Add dimAnalysis for ONNXParallelOp and ONNXForkOp.
imaihal Apr 25, 2024
b70fbcc
Merge branch 'main' into parallelop_forkop_with_omp
imaihal Apr 25, 2024
b6ee509
Merge branch 'main' into parallelop_forkop_with_omp
imaihal May 1, 2024
06cc16c
Merge branch 'main' into parallelop_forkop_with_omp
imaihal May 27, 2024
2bcabe8
Fix inserting regionOp in iterateOp.
imaihal May 29, 2024
c837809
Add scripts for rewriting a model for operator-level parallelization.
imaihal Jun 6, 2024
53805d9
Merge branch 'main' into parallelop_forkop_with_omp
imaihal Jun 6, 2024
17ac56d
black format
imaihal Jun 6, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 9 additions & 7 deletions src/Compiler/CompilerPasses.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -186,13 +186,15 @@ void addONNXToKrnlPasses(mlir::PassManager &pm, int optLevel, bool enableCSE,
if (enableInstrumentONNXSignature)
pm.addNestedPass<func::FuncOp>(
onnx_mlir::createInstrumentONNXSignaturePass());
pm.addPass(onnx_mlir::createLowerToKrnlPass(/*enableTiling*/ optLevel >= 3,
/*enableSIMD*/ optLevel >= 3 && !disableSimdOption, enableParallel,
/*opsToCall*/ opsForCall));
// An additional pass of canonicalization is helpful because lowering
// from ONNX dialect to Standard dialect exposes additional canonicalization
// opportunities.
pm.addPass(mlir::createCanonicalizerPass());
for (unsigned i = 0; i < 2; i++) {
pm.addPass(onnx_mlir::createLowerToKrnlPass(/*enableTiling*/ optLevel >= 3,
/*enableSIMD*/ optLevel >= 3 && !disableSimdOption, enableParallel,
/*opsToCall*/ opsForCall));
// An additional pass of canonicalization is helpful because lowering
// from ONNX dialect to Standard dialect exposes additional canonicalization
// opportunities.
pm.addPass(mlir::createCanonicalizerPass());
}
}

void addKrnlToAffinePasses(mlir::PassManager &pm) {
Expand Down
1 change: 1 addition & 0 deletions src/Conversion/KrnlToAffine/ConvertKrnlToAffine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include "mlir/Dialect/Affine/IR/AffineOps.h"
#include "mlir/Dialect/Affine/LoopUtils.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/Dialect/Vector/IR/VectorOps.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/Types.h"
Expand Down
292 changes: 292 additions & 0 deletions src/Conversion/ONNXToKrnl/Additional/Parallel.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,292 @@
/*
* SPDX-License-Identifier: Apache-2.0
*/

//===-------------------- Parallel.cpp - Lowering Parallel Op and Fork Op
//---------------------===//
//
// Copyright 2019-2023 The IBM Research Authors.
//
// =============================================================================
//
// This file lowers the ONNX Parallel and Fork Operators to Krnl dialect.
//
//===----------------------------------------------------------------------===//

#include "src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.hpp"
#include "src/Dialect/Krnl/DialectBuilder.hpp"

#include <llvm/Support/Debug.h>

#define DEBUG_TYPE "lowering-parallelop-to-krnl"

using namespace mlir;

namespace onnx_mlir {

//===----------------------------------------------------------------------===//
// Helper function
// Return true if `a` happens before `b`, i.e., `a` or one of its ancestors
// properly dominates `b` and `b` is not inside `a`.
// Reference: llvm-project/mlir/lib/Dialect/Transform/IR/TransformInterfaces.cpp
//===----------------------------------------------------------------------===//

static bool happensBefore(Operation *a, Operation *b) {
do {
if (a->isProperAncestor(b))
return false;
if (Operation *bAncestor = a->getBlock()->findAncestorOpInBlock(*b)) {
return a->isBeforeInBlock(bAncestor);
}
} while ((a = a->getParentOp()));
return false;
}

void moveAllocOpOperands(SmallVector<Operation *, 4> &opsToMove,
SmallVector<Operation *, 4> &globalOpsToMove, Operation *returnValOp,
Operation *parentOp) {
if (opsToMove.size() == 0)
return;

SmallVector<Operation *, 4> nextOpsToMove;
for (Operation *op : opsToMove) {
LLVM_DEBUG(llvm::dbgs() << "@@START opsToMove op: = " << *op << "\n");
// Added the op in ops list to move if it is still not added.
if (llvm::find(globalOpsToMove, op) == globalOpsToMove.end()) {
globalOpsToMove.push_back(op);
LLVM_DEBUG(llvm::dbgs() << "Added in opsToMove : " << *op << "\n");
}

Region &parentOpRegion = parentOp->getRegions().front();
Block &parentOpBlock = parentOpRegion.getBlocks().front();

// AllocOp: If allocated value is used in KrnlStoreOp, the KrnlStoreOp need
// to be move.
if (op != returnValOp) {
if (auto allocOp = dyn_cast<memref::AllocOp>(op)) {
for (Operation *user : allocOp.getResult().getUsers()) {
if (auto krnlStoreOp = dyn_cast<KrnlStoreOp>(user)) {
if (user->getBlock() == &parentOpBlock) {
if ((llvm::find(nextOpsToMove, user) == nextOpsToMove.end()) and
(llvm::find(globalOpsToMove, user) ==
globalOpsToMove.end())) {
LLVM_DEBUG(llvm::dbgs()
<< "Added in nextOpsTomove (Single KrnlStore) = "
<< *user << "\n");
nextOpsToMove.push_back(user);
}
} else {
if ((llvm::find(nextOpsToMove, user->getParentOp()) ==
nextOpsToMove.end()) and
(llvm::find(globalOpsToMove, user->getParentOp()) ==
globalOpsToMove.end())) {
LLVM_DEBUG(llvm::dbgs()
<< "Added in nextOpsTomove (KrnlIterateOp): "
<< *(user->getParentOp()) << "\n");
nextOpsToMove.push_back(user->getParentOp());
}
}
}
}
}
}
// KrnlIterateOp: Operations in the region of KrnlIterateOp are already
// added in opsToMove. So, add operands of operations in the region of
// KrnlIterateOp.
if (auto iterateOp = dyn_cast<KrnlIterateOp>(op)) {
Block &iterationBlock = iterateOp.getBodyRegion().front();
for (Operation &iop : iterationBlock.getOperations()) {
LLVM_DEBUG(llvm::dbgs() << "Ops in krnlIterateOp: " << *(&iop) << "\n");
for (unsigned i = 0; i < iop.getNumOperands(); ++i) {
Value oprd = iop.getOperand(i);
if (isa<BlockArgument>(oprd))
continue;
Operation *oprdDefOp = oprd.getDefiningOp();
if (oprdDefOp->getBlock() != &iterationBlock and
oprdDefOp->getBlock() == &parentOpBlock) {
if ((llvm::find(nextOpsToMove, oprdDefOp) ==
nextOpsToMove.end()) and
(llvm::find(globalOpsToMove, oprdDefOp) ==
globalOpsToMove.end())) {

LLVM_DEBUG(llvm::dbgs()
<< "Added in nextOpsTomove operand in KrnlIterateOp: "
<< *oprdDefOp << "\n");
nextOpsToMove.push_back(oprdDefOp);
}
}
}
}
}

// Check if operands need to be moved. Need to move if defining op for the
// operand exists in block in parentOp.
for (unsigned i = 0; i < op->getNumOperands(); ++i) {
Value oprd = op->getOperand(i);
if (isa<BlockArgument>(oprd))
continue;
Operation *oprdDefOp = oprd.getDefiningOp();
if (oprdDefOp->getBlock() == &parentOpBlock) {
if ((llvm::find(nextOpsToMove, oprdDefOp) == nextOpsToMove.end()) and
(llvm::find(globalOpsToMove, oprdDefOp) == globalOpsToMove.end())) {
LLVM_DEBUG(llvm::dbgs() << "Added in nextOpsTomove operand for op: "
<< i << " = " << *oprdDefOp << "\n");
nextOpsToMove.push_back(oprdDefOp);
}
}
}
LLVM_DEBUG(llvm::dbgs() << "@@END\n");
}
// Check if operands of operandDefOp need to be moved recursively
moveAllocOpOperands(nextOpsToMove, globalOpsToMove, returnValOp, parentOp);
}

LogicalResult moveAllocOpBeforeAndReplaceAllUses(
ConversionPatternRewriter &rewriter, Operation *op, Operation *yieldOp) {
SmallVector<Operation *, 4> globalOpsToMove;
for (unsigned ii = 0; ii < yieldOp->getNumOperands(); ++ii) {
// Check the return value of the block to check if operations in the
// block is already lowered to memref-level IR such as KrnlIR. Assume
// the block is still not lowered if the return value is still Tensor
// type. Actual return value of ONNXYieldOp is conveted into tensor by
// unrealized_conversion_cast. So, check the operand of previous
// operation.
Value returnVal = yieldOp->getOperands()[ii];
if (isa<UnrealizedConversionCastOp>(returnVal.getDefiningOp()))
returnVal = returnVal.getDefiningOp()->getOperands()[0];
if (isa<TensorType>(returnVal.getType()))
return failure();

// Move allocOps for results before op
Operation *allocOpForReturnVal = returnVal.getDefiningOp();
SmallVector<Operation *, 4> opsToMove;
opsToMove.push_back(allocOpForReturnVal);
moveAllocOpOperands(opsToMove, globalOpsToMove, allocOpForReturnVal, op);
rewriter.replaceAllUsesWith(
op->getResults()[ii], allocOpForReturnVal->getResult(0));
}

llvm::sort(globalOpsToMove,
[](Operation *a, Operation *b) { return !happensBefore(a, b); });
Operation *justMovedOp = op;
for (Operation *gop : globalOpsToMove) {
gop->moveBefore(justMovedOp);
justMovedOp = gop;
}
return success();
}

void insertRegionInIterateOp(
ConversionPatternRewriter &rewriter, Location &loc, Block &block) {
for (auto iterateOp : block.getOps<KrnlIterateOp>()) {
// Currently KrnlRegionOp does not support input and output to the region.
// So, if KrnlIterateOp has output (by krnl.yield), can't use KrnlRegionOp.
if (iterateOp.getNumResults() == 0) {
KrnlRegionOp regionOp = rewriter.create<KrnlRegionOp>(loc);
Block &regionBlock = regionOp.getBodyRegion().front();
Block &iterateBlock = iterateOp.getRegion().back();
insertRegionInIterateOp(rewriter, loc, iterateBlock); // recursive call
rewriter.eraseOp(iterateBlock.getTerminator());
regionBlock.getOperations().splice(
regionBlock.end(), iterateBlock.getOperations());
rewriter.setInsertionPointToStart(&iterateBlock);
KrnlYieldOp krnlYieldOp = rewriter.create<KrnlYieldOp>(loc);
rewriter.moveOpBefore(regionOp, krnlYieldOp);
}
}
}

struct ONNXParallelOpLowering : public OpConversionPattern<ONNXParallelOp> {
explicit ONNXParallelOpLowering(
TypeConverter &typeConverter, MLIRContext *ctx)
: OpConversionPattern(typeConverter, ctx) {}

LogicalResult matchAndRewrite(ONNXParallelOp parallelOp,
ONNXParallelOpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const final {
Operation *op = parallelOp.getOperation();
Location loc = ONNXLoc<ONNXParallelOp>(op);
IndexExprScope ieScope(&rewriter, loc);
MultiDialectBuilder<KrnlBuilder, IndexExprBuilderForKrnl, MemRefBuilder,
MathBuilder>
create(rewriter, loc);

auto onnxParallelOp = dyn_cast<ONNXParallelOp>(op);
// Get the parallel region.
Region &parallelBody = onnxParallelOp.getBody();
// Make sure the region has only one block.
if (!parallelBody.hasOneBlock())
return success();
// Get YieldOp of the body block.
Block &bodyBlock = parallelBody.front();
Operation *yieldOp = bodyBlock.getTerminator();
if (!isa<ONNXYieldOp>(yieldOp))
return failure();

// Move alloc ops included in ForkOps
SmallVector<ONNXForkOp, 4> forkOps;
for (Operation &bOp : bodyBlock.getOperations()) {
if (auto forkOp = dyn_cast<ONNXForkOp>(bOp)) {
forkOps.push_back(forkOp);
Operation *forkYieldOp = forkOp.getBody().front().getTerminator();
if (!isa<ONNXYieldOp>(forkYieldOp))
return failure();

if (failed(moveAllocOpBeforeAndReplaceAllUses(
rewriter, &bOp, forkYieldOp)))
return failure();
}
}

// Move allocOp included in ParallelOp
if (failed(moveAllocOpBeforeAndReplaceAllUses(rewriter, op, yieldOp)))
return failure();

// Create KrnlIterateOp and replace ParallelOp with it.
rewriter.setInsertionPoint(op);
std::vector<Value> loop;
defineLoops(rewriter, loc, loop, 1);
krnl::KrnlIterateOperandPack pack(rewriter, loop);
pack.pushConstantBound(0);
pack.pushConstantBound(forkOps.size());
KrnlBuilder createKrnl(rewriter, loc);
createKrnl.parallel(loop);
KrnlIterateOp iterateOp = createKrnl.iterate(pack);
Block &iterationBlock = iterateOp.getBodyRegion().back();
rewriter.setInsertionPointToStart(&iterationBlock);
ValueRange indices = createKrnl.getInductionVarValue({loop[0]});
rewriter.eraseOp(yieldOp);
rewriter.inlineBlockBefore(&bodyBlock, iterationBlock.getTerminator());

// Create SCFIfOp and replace ForkOp with it.
int64_t id = 0;
for (auto forkOp : forkOps) {
rewriter.setInsertionPoint(forkOp);
// Insert scf::IfOp
Value forkId = create.math.constantIndex(id);
Value eq = create.math.eq(forkId, indices[0]);
scf::IfOp ifOp = rewriter.create<scf::IfOp>(loc, eq, /*else=*/false);
Block &ifBlock = ifOp.getThenRegion().back();
rewriter.setInsertionPointToStart(&ifBlock);
// Insert KrnlRegionOp in every KrnlIterateOps. This needs to avoid
// errors in convertKrnlToAffinePass.
Block &forkBlock = forkOp.getRegion().back();
insertRegionInIterateOp(rewriter, loc, forkBlock);
Operation *forkYieldOp = forkBlock.getTerminator();
rewriter.eraseOp(forkYieldOp);
rewriter.inlineBlockBefore(&forkBlock, ifBlock.getTerminator());
rewriter.eraseOp(forkOp);
id++;
}

rewriter.eraseOp(op);
return success();
}
};

void populateLoweringONNXParallelOpPattern(RewritePatternSet &patterns,
TypeConverter &typeConverter, MLIRContext *ctx) {
patterns.insert<ONNXParallelOpLowering>(typeConverter, ctx);
}

} // namespace onnx_mlir
1 change: 1 addition & 0 deletions src/Conversion/ONNXToKrnl/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ add_onnx_mlir_library(OMONNXToKrnl
Additional/Custom.cpp
Additional/LayoutTransform.cpp
Additional/ShapeTransform.cpp
Additional/Parallel.cpp
ControlFlow/If.cpp
ControlFlow/Loop.cpp
ControlFlow/Scan.cpp
Expand Down
1 change: 1 addition & 0 deletions src/Conversion/ONNXToKrnl/ConvertONNXToKrnl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -286,6 +286,7 @@ void populateONNXToKrnlConversionPattern(RewritePatternSet &patterns,
populateLoweringONNXCustomOpPattern(patterns, typeConverter, ctx);
populateLoweringONNXLayoutTransformOpPattern(patterns, typeConverter, ctx, enableParallel);
populateLoweringONNXShapeTransformOpPattern(patterns, typeConverter, ctx);
populateLoweringONNXParallelOpPattern(patterns, typeConverter, ctx);
// clang-format on
}

Expand Down
4 changes: 4 additions & 0 deletions src/Conversion/ONNXToKrnl/ONNXToKrnlCommon.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
#include "mlir/Dialect/Linalg/IR/Linalg.h"
#include "mlir/Dialect/Math/IR/Math.h"
#include "mlir/Dialect/MemRef/IR/MemRef.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/DialectConversion.h"
Expand Down Expand Up @@ -470,6 +471,9 @@ void populateLoweringONNXShapeTransformOpPattern(
void populateLoweringONNXCustomOpPattern(
mlir::RewritePatternSet &, mlir::TypeConverter &, mlir::MLIRContext *);

void populateLoweringONNXParallelOpPattern(
mlir::RewritePatternSet &, mlir::TypeConverter &, mlir::MLIRContext *);

// Utilities for generating krnl.call for ONNX Ops

// Create allocate based on COMPUTED shapeHelper.
Expand Down
Loading
Loading