Skip to content

Commit

Permalink
PDL crashes on my simple file
Browse files Browse the repository at this point in the history
╭─siddu_druid@siddharth-lean ~/phd/mlir-hoopl-rete/test ‹master●›
╰─$ mlir-opt pdl-simple.mlir -allow-unregistered-dialect -test-pdl-bytecode-pass                                                                                                                                                          148 ↵
PLEASE submit a bug report to https://github.com/llvm/llvm-project/issues/ and include the crash backtrace.
Stack dump:
0.      Program arguments: mlir-opt pdl-simple.mlir -allow-unregistered-dialect -test-pdl-bytecode-pass
 #0 0x00000000008be623 llvm::sys::PrintStackTrace(llvm::raw_ostream&, int) (/home/siddu_druid/phd/mlir-hoopl-rete/llvm-project/build/bin/mlir-opt+0x8be623)
 #1 0x00000000008bc2de llvm::sys::RunSignalHandlers() (/home/siddu_druid/phd/mlir-hoopl-rete/llvm-project/build/bin/mlir-opt+0x8bc2de)
 #2 0x00000000008bec16 SignalHandler(int) (/home/siddu_druid/phd/mlir-hoopl-rete/llvm-project/build/bin/mlir-opt+0x8bec16)
 #3 0x00007fad2398d730 __restore_rt (/lib/x86_64-linux-gnu/libpthread.so.0+0x12730)
 #4 0x0000000000faa046 std::enable_if<!(std::is_convertible<mlir::ValueRange&, mlir::Operation*>::value), void>::type mlir::ResultRange::replaceAllUsesWith<mlir::ValueRange&>(mlir::ValueRange&) (/home/siddu_druid/phd/mlir-hoopl-rete/llvm-pr
oject/build/bin/mlir-opt+0xfaa046)
 #5 0x0000000001862fac mlir::RewriterBase::replaceOp(mlir::Operation*, mlir::ValueRange) (/home/siddu_druid/phd/mlir-hoopl-rete/llvm-project/build/bin/mlir-opt+0x1862fac)
 #6 0x00000000018d6ad5 (anonymous namespace)::ByteCodeExecutor::execute(mlir::PatternRewriter&, llvm::SmallVectorImpl<mlir::detail::PDLByteCode::MatchResult>*, llvm::Optional<mlir::Location>) (/home/siddu_druid/phd/mlir-hoopl-rete/llvm-proj
ect/build/bin/mlir-opt+0x18d6ad5)
 #7 0x00000000018d8ec1 mlir::detail::PDLByteCode::rewrite(mlir::PatternRewriter&, mlir::detail::PDLByteCode::MatchResult const&, mlir::detail::PDLByteCodeMutableState&) const (/home/siddu_druid/phd/mlir-hoopl-rete/llvm-project/build/bin/mli
r-opt+0x18d8ec1)
 #8 0x00000000018f15b6 mlir::PatternApplicator::matchAndRewrite(mlir::Operation*, mlir::PatternRewriter&, llvm::function_ref<bool (mlir::Pattern const&)>, llvm::function_ref<void (mlir::Pattern const&)>, llvm::function_ref<mlir::LogicalResu
lt (mlir::Pattern const&)>) (/home/siddu_druid/phd/mlir-hoopl-rete/llvm-project/build/bin/mlir-opt+0x18f15b6)
 #9 0x00000000017aab4c mlir::applyPatternsAndFoldGreedily(llvm::MutableArrayRef<mlir::Region>, mlir::FrozenRewritePatternSet const&, mlir::GreedyRewriteConfig) (/home/siddu_druid/phd/mlir-hoopl-rete/llvm-project/build/bin/mlir-opt+0x17aab4c
)
signed int, mlir::PassInstrumentor*, mlir::PassInstrumentation::PipelineParentInfo const*) (/home/siddu_druid/phd/mlir-hoopl-rete/llvm-project/build/bin/mlir-opt+0x174f404)
+0x1725bf0)
try&, llvm::ThreadPool*) (/home/siddu_druid/phd/mlir-hoopl-rete/llvm-project/build/bin/mlir-opt+0x1723dfa)
ol, bool, bool) (/home/siddu_druid/phd/mlir-hoopl-rete/llvm-project/build/bin/mlir-opt+0x1723aaa)
[2]    27957 segmentation fault  mlir-opt pdl-simple.mlir -allow-unregistered-dialect -test-pdl-bytecode-pass
╭─siddu_druid@siddharth-lean ~/phd/mlir-hoopl-rete/test ‹master●›
  • Loading branch information
bollu committed Feb 4, 2022
1 parent 4c69405 commit 532322d
Showing 1 changed file with 18 additions and 97 deletions.
115 changes: 18 additions & 97 deletions test/pdl-simple.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -3,115 +3,36 @@
// -----

//===----------------------------------------------------------------------===//
// 1-layer perceptron with split fwd/bwd operations
// Asm add rewrite to write add (int 0) a -> a
//===----------------------------------------------------------------------===//

module @patterns {
// fc_fwd
pdl.pattern : benefit(1) {
%in_type = pdl.type
%out_type = pdl.type
%weight_type = pdl.type
%rxact = pdl.operand : %in_type
%weight = pdl.operand : %weight_type

%attr0 = pdl.attribute false
%op0 = pdl.operation "tf.MatMul" (%rxact, %weight : !pdl.value, !pdl.value) {"transpose_a" = %attr0, "transpose_b" = %attr0} -> (%out_type : !pdl.type)

pdl.rewrite %op0 {
%op1 = pdl.operation "kernel.FcFwd" (%rxact, %weight : !pdl.value, !pdl.value) -> (%out_type : !pdl.type)
%val1 = pdl.result 0 of %op1 // txact
pdl.replace %op0 with (%val1 : !pdl.value) // tf.MatMul
}
}

// fc_bwd
pdl.pattern : benefit(4) {
%in_type = pdl.type
%out_type = pdl.type
%weight_type = pdl.type
%const_type = pdl.type
%rxact = pdl.operand : %in_type
%rxdelta = pdl.operand : %out_type
%weight = pdl.operand : %weight_type

%attr0 = pdl.attribute true
%attr1 = pdl.attribute false
%op0 = pdl.operation "tf.MatMul" (%rxact, %rxdelta : !pdl.value, !pdl.value) {"transpose_a" = %attr0, "transpose_b" = %attr1} -> (%weight_type : !pdl.type)
%c0_type = pdl.type
%cr_type = pdl.type
%cr = pdl.operand : %cr_type

%c0_attr = pdl.attribute 0 : i32
// TODO: is pdl.operation allowed to have empty arg list?
// %op0 = pdl.operation "asm.int" () {"value" = %c0_attr} -> (%c0_type : !pdl.type)
%op0 = pdl.operation "asm.int" {"value" = %c0_attr} -> (%c0_type : !pdl.type)
%val0 = pdl.result 0 of %op0
%op1 = pdl.operation "tf.Const" -> (%const_type : !pdl.type)
%val1 = pdl.result 0 of %op1
%op2 = pdl.operation "tf.Mul" (%val0, %val1 : !pdl.value, !pdl.value) -> (%weight_type : !pdl.type)
%val2 = pdl.result 0 of %op2
%op3 = pdl.operation "tf.Sub" (%weight, %val2 : !pdl.value, !pdl.value) -> (%weight_type : !pdl.type)

pdl.rewrite %op3 {
%op4 = pdl.operation "kernel.FcBwd" (%rxact, %rxdelta, %weight : !pdl.value, !pdl.value, !pdl.value) -> (%weight_type : !pdl.type)
%val4 = pdl.result 0 of %op4 // weight_out
pdl.replace %op3 with (%val4 : !pdl.value) // tf.Sub
pdl.erase %op2 // tf.Mul
pdl.erase %op1 // tf.Const
pdl.erase %op0 // tf.MatMul
}
}

// softmax_cross_entropy
pdl.pattern : benefit(6) {
%in_type = pdl.type
%label_type = pdl.type
%loss_type = pdl.type
%mean_loss_type = pdl.type
%mean_const_type = pdl.type
%mul_const_type = pdl.type
%rxact = pdl.operand : %in_type
%rxlabel = pdl.operand : %label_type

%op0 = pdl.operation "tf.SparseSoftmaxCrossEntropyWithLogits" (%rxact, %rxlabel : !pdl.value, !pdl.value) -> (%loss_type, %in_type : !pdl.type, !pdl.type)
%val0_0 = pdl.result 0 of %op0 // loss
%val0_1 = pdl.result 1 of %op0 // gradient
%op1 = pdl.operation "tf.Const" -> (%mean_const_type : !pdl.type)
%val1 = pdl.result 0 of %op1
%op2 = pdl.operation "tf.Mean" (%val0_0, %val1 : !pdl.value, !pdl.value) -> (%mean_loss_type : !pdl.type)
%val2 = pdl.result 0 of %op2
%op3 = pdl.operation "tf.PreventGradient" (%val0_1 : !pdl.value) -> (%in_type : !pdl.type)
%val3 = pdl.result 0 of %op3
%op4 = pdl.operation "tf.Const" -> (%mul_const_type : !pdl.type)
%val4 = pdl.result 0 of %op4
%op5 = pdl.operation "tf.Mul" (%val3, %val4 : !pdl.value, !pdl.value) -> (%in_type : !pdl.type)
%opadd = pdl.operation "asm.add" (%val0, %cr : !pdl.value, !pdl.value) -> (%c0_type : !pdl.type)

pdl.rewrite { // roots: %op2, %op5
%op6 = pdl.operation "kernel.SoftmaxCrossEntropy" (%rxact, %rxlabel : !pdl.value, !pdl.value) -> (%mean_loss_type, %in_type : !pdl.type, !pdl.type)
%val6_0 = pdl.result 0 of %op6 // txloss
%val6_1 = pdl.result 1 of %op6 // txdelta
pdl.replace %op5 with (%val6_1 : !pdl.value) // tf.Mul
pdl.erase %op4 // tf.Const
pdl.erase %op3 // tf.PreventGradient
pdl.replace %op2 with (%val6_0 : !pdl.value) // tf.Mean
pdl.erase %op1 // tf.Const
pdl.erase %op0 // tf.SparseSoftmaxCrossEntropyWithLogits
pdl.rewrite %opadd {
// %op1 = pdl.operation "kernel.FcFwd" (%rxact, %weight : !pdl.value, !pdl.value) -> (%out_type : !pdl.type)
%val1 = pdl.result 1 of %opadd
pdl.replace %op0 with (%val1 : !pdl.value)
}
}
}

// CHECK-LABEL: test.mlp_split
// CHECK: %[[FWD:.*]] = "kernel.FcFwd"(%arg0, %arg2) : (tensor<2x20xf32>, tensor<20x10xf32>) -> tensor<2x10xf32>
// CHECK: %[[SM:.*]]:2 = "kernel.SoftmaxCrossEntropy"(%[[FWD]], %arg1) : (tensor<2x10xf32>, tensor<2xi32>) -> (tensor<f32>, tensor<2x10xf32>)
// CHECK: %[[BWD:.*]] = "kernel.FcBwd"(%arg0, %[[SM]]#1, %arg2) : (tensor<2x20xf32>, tensor<2x10xf32>, tensor<20x10xf32>) -> tensor<20x10xf32>
// CHECK: return %[[SM:.*]]#0, %[[BWD]] : tensor<f32>, tensor<20x10xf32>
module @ir attributes { test.mlp_split } {
func @main(%arg0: tensor<2x20xf32>, %arg1: tensor<2xi32>, %arg2: tensor<20x10xf32>) -> (tensor<f32>, tensor<20x10xf32>) {
%0 = "tf.Const"() {value = dense<0> : tensor<1xi32>} : () -> tensor<1xi32>
%1 = "tf.Const"() {value = dense<1.000000e-01> : tensor<f32>} : () -> tensor<f32>
%2 = "tf.Const"() {value = dense<5.000000e-01> : tensor<2x1xf32>} : () -> tensor<2x1xf32>
%3 = "tf.MatMul"(%arg0, %arg2) {transpose_a = false, transpose_b = false} : (tensor<2x20xf32>, tensor<20x10xf32>) -> tensor<2x10xf32>
%loss, %backprop = "tf.SparseSoftmaxCrossEntropyWithLogits"(%3, %arg1) : (tensor<2x10xf32>, tensor<2xi32>) -> (tensor<2xf32>, tensor<2x10xf32>)
%4 = "tf.Mean"(%loss, %0) {keep_dims = false} : (tensor<2xf32>, tensor<1xi32>) -> tensor<f32>
%5 = "tf.PreventGradient"(%backprop) : (tensor<2x10xf32>) -> tensor<2x10xf32>
%6 = "tf.Mul"(%5, %2) : (tensor<2x10xf32>, tensor<2x1xf32>) -> tensor<2x10xf32>
%7 = "tf.MatMul"(%arg0, %6) {transpose_a = true, transpose_b = false} : (tensor<2x20xf32>, tensor<2x10xf32>) -> tensor<20x10xf32>
%8 = "tf.Mul"(%7, %1) : (tensor<20x10xf32>, tensor<f32>) -> tensor<20x10xf32>
%9 = "tf.Sub"(%arg2, %8) : (tensor<20x10xf32>, tensor<20x10xf32>) -> tensor<20x10xf32>
return %4, %9 : tensor<f32>, tensor<20x10xf32>
func @main(%r: i32) -> (i32) {
%c0 = "asm.int"() { value = 0 : i32} : () -> (i32)
%add = "asm.add"(%c0, %r) : (i32, i32) -> (i32)
return %add : i32
}
}

0 comments on commit 532322d

Please sign in to comment.