Skip to content
This repository has been archived by the owner on Jan 20, 2024. It is now read-only.

Commit

Permalink
[mlir][llvm] Drop unreachable basic block during import (#78467)
Browse files Browse the repository at this point in the history
This revision updates the LLVM IR import to support unreachable basic
blocks. An unreachable block may dominate itself and a value defined
inside the block may thus be used before its definition. The import does
not support such dependencies. We thus delete the unreachable basic
blocks before the import. This is possible since MLIR does not have
basic block labels that can be reached using an indirect call and
unreachable blocks can indeed be deleted safely.

Additionally, add a small poison constant import test.
  • Loading branch information
gysit committed Jan 19, 2024
1 parent 3d90e1f commit 9dd0eb9
Show file tree
Hide file tree
Showing 4 changed files with 97 additions and 35 deletions.
46 changes: 31 additions & 15 deletions mlir/lib/Target/LLVMIR/ModuleImport.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
#include "mlir/Interfaces/DataLayoutInterfaces.h"
#include "mlir/Tools/mlir-translate/Translation.h"

#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/StringSet.h"
Expand Down Expand Up @@ -132,18 +133,17 @@ static LogicalResult convertInstructionImpl(OpBuilder &odsBuilder,
return failure();
}

/// Get a topologically sorted list of blocks for the given function.
/// Get a topologically sorted list of blocks for the given basic blocks.
static SetVector<llvm::BasicBlock *>
getTopologicallySortedBlocks(llvm::Function *func) {
getTopologicallySortedBlocks(ArrayRef<llvm::BasicBlock *> basicBlocks) {
SetVector<llvm::BasicBlock *> blocks;
for (llvm::BasicBlock &bb : *func) {
if (!blocks.contains(&bb)) {
llvm::ReversePostOrderTraversal<llvm::BasicBlock *> traversal(&bb);
for (llvm::BasicBlock *basicBlock : basicBlocks) {
if (!blocks.contains(basicBlock)) {
llvm::ReversePostOrderTraversal<llvm::BasicBlock *> traversal(basicBlock);
blocks.insert(traversal.begin(), traversal.end());
}
}
assert(blocks.size() == func->size() && "some blocks are not sorted");

assert(blocks.size() == basicBlocks.size() && "some blocks are not sorted");
return blocks;
}

Expand Down Expand Up @@ -1859,11 +1859,26 @@ LogicalResult ModuleImport::processFunction(llvm::Function *func) {
if (func->isDeclaration())
return success();

// Eagerly create all blocks.
for (llvm::BasicBlock &bb : *func) {
Block *block =
builder.createBlock(&funcOp.getBody(), funcOp.getBody().end());
mapBlock(&bb, block);
// Collect the set of basic blocks reachable from the function's entry block.
// This step is crucial as LLVM IR can contain unreachable blocks that
// self-dominate. As a result, an operation might utilize a variable it
// defines, which the import does not support. Given that MLIR lacks block
// label support, we can safely remove unreachable blocks, as there are no
// indirect branch instructions that could potentially target these blocks.
llvm::df_iterator_default_set<llvm::BasicBlock *> reachable;
for (llvm::BasicBlock *basicBlock : llvm::depth_first_ext(func, reachable))
(void)basicBlock;

// Eagerly create all reachable blocks.
SmallVector<llvm::BasicBlock *> reachableBasicBlocks;
for (llvm::BasicBlock &basicBlock : *func) {
// Skip unreachable blocks.
if (!reachable.contains(&basicBlock))
continue;
Region &body = funcOp.getBody();
Block *block = builder.createBlock(&body, body.end());
mapBlock(&basicBlock, block);
reachableBasicBlocks.push_back(&basicBlock);
}

// Add function arguments to the entry block.
Expand All @@ -1876,10 +1891,11 @@ LogicalResult ModuleImport::processFunction(llvm::Function *func) {
// Process the blocks in topological order. The ordered traversal ensures
// operands defined in a dominating block have a valid mapping to an MLIR
// value once a block is translated.
SetVector<llvm::BasicBlock *> blocks = getTopologicallySortedBlocks(func);
SetVector<llvm::BasicBlock *> blocks =
getTopologicallySortedBlocks(reachableBasicBlocks);
setConstantInsertionPointToStart(lookupBlock(blocks.front()));
for (llvm::BasicBlock *bb : blocks)
if (failed(processBasicBlock(bb, lookupBlock(bb))))
for (llvm::BasicBlock *basicBlock : blocks)
if (failed(processBasicBlock(basicBlock, lookupBlock(basicBlock))))
return failure();

// Process the debug intrinsics that require a delayed conversion after
Expand Down
10 changes: 10 additions & 0 deletions mlir/test/Target/LLVMIR/Import/constant.ll
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,16 @@ define void @undef_constant(i32 %arg0) {

; // -----

; CHECK-LABEL: @poison_constant
define void @poison_constant(double %arg0) {
; CHECK: %[[POISON:.+]] = llvm.mlir.poison : f64
; CHECK: llvm.fadd %[[POISON]], %{{.*}} : f64
%1 = fadd double poison, %arg0
ret void
}

; // -----

; CHECK-LABEL: @null_constant
define ptr @null_constant() {
; CHECK: %[[NULL:[0-9]+]] = llvm.mlir.zero : !llvm.ptr
Expand Down
41 changes: 21 additions & 20 deletions mlir/test/Target/LLVMIR/Import/exception.ll
Original file line number Diff line number Diff line change
Expand Up @@ -12,34 +12,35 @@ define i32 @invokeLandingpad() personality ptr @__gxx_personality_v0 {
; CHECK: %[[a1:[0-9]+]] = llvm.mlir.addressof @_ZTIii : !llvm.ptr
; CHECK: %[[a3:[0-9]+]] = llvm.alloca %{{[0-9]+}} x i8 {alignment = 1 : i64} : (i32) -> !llvm.ptr
%1 = alloca i8
; CHECK: llvm.invoke @foo(%[[a3]]) to ^bb2 unwind ^bb1 : (!llvm.ptr) -> ()
invoke void @foo(ptr %1) to label %4 unwind label %2
; CHECK: llvm.invoke @foo(%[[a3]]) to ^[[bb1:.*]] unwind ^[[bb4:.*]] : (!llvm.ptr) -> ()
invoke void @foo(ptr %1) to label %bb1 unwind label %bb4

; CHECK: ^bb1:
; CHECK: ^[[bb1]]:
bb1:
; CHECK: %{{[0-9]+}} = llvm.invoke @bar(%[[a3]]) to ^[[bb2:.*]] unwind ^[[bb4]] : (!llvm.ptr) -> !llvm.ptr
%2 = invoke ptr @bar(ptr %1) to label %bb2 unwind label %bb4

; CHECK: ^[[bb2]]:
bb2:
; CHECK: llvm.invoke @vararg_foo(%[[a3]], %{{.*}}) to ^[[bb3:.*]] unwind ^[[bb4]] vararg(!llvm.func<void (ptr, ...)>) : (!llvm.ptr, i32) -> ()
invoke void (ptr, ...) @vararg_foo(ptr %1, i32 0) to label %bb3 unwind label %bb4

; CHECK: ^[[bb3]]:
bb3:
; CHECK: llvm.invoke %{{.*}}(%[[a3]], %{{.*}}) to ^[[bb5:.*]] unwind ^[[bb4]] vararg(!llvm.func<void (ptr, ...)>) : !llvm.ptr, (!llvm.ptr, i32) -> ()
invoke void (ptr, ...) undef(ptr %1, i32 0) to label %bb5 unwind label %bb4

; CHECK: ^[[bb4]]:
bb4:
; CHECK: %{{[0-9]+}} = llvm.landingpad (catch %{{[0-9]+}} : !llvm.ptr) (catch %[[a1]] : !llvm.ptr) (filter %{{[0-9]+}} : !llvm.array<1 x i1>) : !llvm.struct<(ptr, i32)>
%3 = landingpad { ptr, i32 } catch ptr @_ZTIi catch ptr @_ZTIii
filter [1 x i1] [i1 1]
resume { ptr, i32 } %3

; CHECK: ^bb2:
; CHECK: ^[[bb5]]:
bb5:
; CHECK: llvm.return %{{[0-9]+}} : i32
ret i32 1

; CHECK: ^bb3:
; CHECK: %{{[0-9]+}} = llvm.invoke @bar(%[[a3]]) to ^bb2 unwind ^bb1 : (!llvm.ptr) -> !llvm.ptr
%6 = invoke ptr @bar(ptr %1) to label %4 unwind label %2

; CHECK: ^bb4:
; CHECK: llvm.invoke @vararg_foo(%[[a3]], %{{.*}}) to ^bb2 unwind ^bb1 vararg(!llvm.func<void (ptr, ...)>) : (!llvm.ptr, i32) -> ()
invoke void (ptr, ...) @vararg_foo(ptr %1, i32 0) to label %4 unwind label %2

; CHECK: ^bb5:
; CHECK: llvm.invoke %{{.*}}(%[[a3]], %{{.*}}) to ^bb2 unwind ^bb1 vararg(!llvm.func<void (ptr, ...)>) : !llvm.ptr, (!llvm.ptr, i32) -> ()
invoke void (ptr, ...) undef(ptr %1, i32 0) to label %4 unwind label %2

; CHECK: ^bb6:
; CHECK: llvm.return %{{[0-9]+}} : i32
ret i32 0
}

declare i32 @foo2()
Expand Down
35 changes: 35 additions & 0 deletions mlir/test/Target/LLVMIR/Import/unreachable-blocks.ll
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
; RUN: mlir-translate -import-llvm %s | FileCheck %s

; Test unreachable blocks are dropped.

; CHECK-LABEL: llvm.func @unreachable_block
define void @unreachable_block(float %0) {
.entry:
; CHECK: llvm.return
ret void

unreachable:
; CHECK-NOT: llvm.fadd
%1 = fadd float %0, %1
br label %unreachable
}

; Test unreachable blocks with back edges are supported.

; CHECK-LABEL: llvm.func @back_edge
define i32 @back_edge(i32 %0) {
.entry:
; CHECK: llvm.br ^[[RET:.*]](%{{.*}})
br label %ret
ret:
; CHECK: ^[[RET]](%{{.*}}: i32)
%1 = phi i32 [ %0, %.entry ], [ %2, %unreachable ]
; CHECK: llvm.return %{{.*}} : i32
ret i32 %1

unreachable:
; CHECK-NOT: add
%2 = add i32 %0, %2
%3 = icmp eq i32 %2, 42
br i1 %3, label %ret, label %unreachable
}

0 comments on commit 9dd0eb9

Please sign in to comment.