Skip to content

Commit

Permalink
[mlir] Shape.AssumingOp implements RegionBranchOpInterface.
Browse files Browse the repository at this point in the history
This adds support for the interface and provides unambigious information
on the control flow as it is unconditional on any runtime values.
The code is tested through confirming that buffer-placement behaves as
expected.

Differential Revision: https://reviews.llvm.org/D87894
  • Loading branch information
tpopp committed Sep 21, 2020
1 parent 4a5cc38 commit ffdd4a4
Show file tree
Hide file tree
Showing 3 changed files with 55 additions and 0 deletions.
1 change: 1 addition & 0 deletions mlir/include/mlir/Dialect/Shape/IR/ShapeOps.td
Expand Up @@ -619,6 +619,7 @@ def Shape_AssumingAllOp : Shape_Op<"assuming_all", [Commutative, NoSideEffect]>

def Shape_AssumingOp : Shape_Op<"assuming",
[SingleBlockImplicitTerminator<"AssumingYieldOp">,
DeclareOpInterfaceMethods<RegionBranchOpInterface>,
RecursiveSideEffects]> {
let summary = "Execute the region";
let description = [{
Expand Down
15 changes: 15 additions & 0 deletions mlir/lib/Dialect/Shape/IR/Shape.cpp
Expand Up @@ -233,6 +233,21 @@ void AssumingOp::getCanonicalizationPatterns(OwningRewritePatternList &patterns,
patterns.insert<AssumingWithTrue>(context);
}

// See RegionBranchOpInterface in Interfaces/ControlFlowInterfaces.td
void AssumingOp::getSuccessorRegions(
Optional<unsigned> index, ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
// AssumingOp has unconditional control flow into the region and back to the
// parent, so return the correct RegionSuccessor purely based on the index
// being None or 0.
if (index.hasValue()) {
regions.push_back(RegionSuccessor(getResults()));
return;
}

regions.push_back(RegionSuccessor(&doRegion()));
}

void AssumingOp::inlineRegionIntoParent(AssumingOp &op,
PatternRewriter &rewriter) {
auto *blockBeforeAssuming = rewriter.getInsertionBlock();
Expand Down
39 changes: 39 additions & 0 deletions mlir/test/Transforms/buffer-placement.mlir
Expand Up @@ -1417,3 +1417,42 @@ func @do_loop_alloc(
}

// expected-error@+1 {{Structured control-flow loops are supported only}}

// -----

func @assumingOp(%arg0: !shape.witness, %arg2: memref<2xf32>, %arg3: memref<2xf32>) {
// Confirm the alloc will be dealloc'ed in the block.
%1 = shape.assuming %arg0 -> memref<2xf32> {
%0 = alloc() : memref<2xf32>
shape.assuming_yield %arg2 : memref<2xf32>
}
// Confirm the alloc will be returned and dealloc'ed after its use.
%3 = shape.assuming %arg0 -> memref<2xf32> {
%2 = alloc() : memref<2xf32>
shape.assuming_yield %2 : memref<2xf32>
}
"linalg.copy"(%3, %arg3) : (memref<2xf32>, memref<2xf32>) -> ()
return
}

// CHECK-LABEL: func @assumingOp(
// CHECK-SAME: %[[ARG0:.*]]: !shape.witness,
// CHECK-SAME: %[[ARG1:.*]]: memref<2xf32>,
// CHECK-SAME: %[[ARG2:.*]]: memref<2xf32>) {
// CHECK: %[[UNUSED_RESULT:.*]] = shape.assuming %[[ARG0]] -> (memref<2xf32>) {
// CHECK: %[[ALLOC0:.*]] = alloc() : memref<2xf32>
// CHECK: dealloc %[[ALLOC0]] : memref<2xf32>
// CHECK: shape.assuming_yield %[[ARG1]] : memref<2xf32>
// CHECK: }
// CHECK: %[[ASSUMING_RESULT:.*]] = shape.assuming %[[ARG0]] -> (memref<2xf32>) {
// CHECK: %[[TMP_ALLOC:.*]] = alloc() : memref<2xf32>
// CHECK: %[[RETURNING_ALLOC:.*]] = alloc() : memref<2xf32>
// CHECK: linalg.copy(%[[TMP_ALLOC]], %[[RETURNING_ALLOC]]) : memref<2xf32>, memref<2xf32>
// CHECK: dealloc %[[TMP_ALLOC]] : memref<2xf32>
// CHECK: shape.assuming_yield %[[RETURNING_ALLOC]] : memref<2xf32>
// CHECK: }
// CHECK: linalg.copy(%[[ASSUMING_RESULT:.*]], %[[ARG2]]) : memref<2xf32>, memref<2xf32>
// CHECK: dealloc %[[ASSUMING_RESULT]] : memref<2xf32>
// CHECK: return
// CHECK: }

0 comments on commit ffdd4a4

Please sign in to comment.