Skip to content

Commit

Permalink
[mlir] drop unnecssary transform.with_pdl_patterns from tests, NFC
Browse files Browse the repository at this point in the history
Many tests wrap the piece of the IR related to the transform dialect
into `transform.with_pdl_patterns` without actually using PDL patterns
inside. Some of these are leftovers from migration to `structured.match`
and some others are cargo cult, both are useless and pollute the tests.

Reviewed By: guraypp

Differential Revision: https://reviews.llvm.org/D135661
  • Loading branch information
ftynse committed Oct 11, 2022
1 parent 42b7079 commit 2e9abc0
Show file tree
Hide file tree
Showing 26 changed files with 605 additions and 974 deletions.
50 changes: 19 additions & 31 deletions mlir/test/Dialect/Bufferization/Transforms/transform-ops.mlir
Expand Up @@ -2,14 +2,11 @@

// Test One-Shot Bufferize.

transform.with_pdl_patterns {
^bb0(%arg0: !pdl.operation):
sequence %arg0 : !pdl.operation failures(propagate) {
^bb0(%arg1: !pdl.operation):
%0 = transform.structured.match ops{["func.func"]} in %arg1
transform.bufferization.one_shot_bufferize %0
{target_is_module = false}
}
transform.sequence failures(propagate) {
^bb0(%arg1: !pdl.operation):
%0 = transform.structured.match ops{["func.func"]} in %arg1
transform.bufferization.one_shot_bufferize %0
{target_is_module = false}
}

// CHECK-LABEL: func @test_function(
Expand All @@ -34,14 +31,11 @@ func.func @test_function(%A : tensor<?xf32>, %v : vector<4xf32>) -> (tensor<?xf3

// Test analysis of One-Shot Bufferize only.

transform.with_pdl_patterns {
^bb0(%arg0: !pdl.operation):
sequence %arg0 : !pdl.operation failures(propagate) {
^bb0(%arg1: !pdl.operation):
%0 = transform.structured.match ops{["func.func"]} in %arg1
transform.bufferization.one_shot_bufferize %0
{target_is_module = false, test_analysis_only = true}
}
transform.sequence failures(propagate) {
^bb0(%arg1: !pdl.operation):
%0 = transform.structured.match ops{["func.func"]} in %arg1
transform.bufferization.one_shot_bufferize %0
{target_is_module = false, test_analysis_only = true}
}

// CHECK-LABEL: func @test_function_analysis(
Expand All @@ -60,14 +54,11 @@ func.func @test_function_analysis(%A : tensor<?xf32>, %v : vector<4xf32>) -> (te
// Test One-Shot Bufferize transform failure with an unknown op. This would be
// allowed with `allow_unknown_ops`.

transform.with_pdl_patterns {
^bb0(%arg0: !pdl.operation):
sequence %arg0 : !pdl.operation failures(propagate) {
^bb0(%arg1: !pdl.operation):
%0 = transform.structured.match ops{["func.func"]} in %arg1
// expected-error @+1 {{bufferization failed}}
transform.bufferization.one_shot_bufferize %0 {target_is_module = false}
}
transform.sequence failures(propagate) {
^bb0(%arg1: !pdl.operation):
%0 = transform.structured.match ops{["func.func"]} in %arg1
// expected-error @+1 {{bufferization failed}}
transform.bufferization.one_shot_bufferize %0 {target_is_module = false}
}

func.func @test_unknown_op_failure() -> (tensor<?xf32>) {
Expand All @@ -80,13 +71,10 @@ func.func @test_unknown_op_failure() -> (tensor<?xf32>) {

// Test One-Shot Bufferize transform failure with a module op.

transform.with_pdl_patterns {
^bb0(%arg0: !pdl.operation):
sequence %arg0 : !pdl.operation failures(propagate) {
^bb0(%arg1: !pdl.operation):
// %arg1 is the module
transform.bufferization.one_shot_bufferize %arg1
}
transform.sequence failures(propagate) {
^bb0(%arg1: !pdl.operation):
// %arg1 is the module
transform.bufferization.one_shot_bufferize %arg1
}

module {
Expand Down
72 changes: 29 additions & 43 deletions mlir/test/Dialect/GPU/transform-gpu-failing.mlir
Expand Up @@ -44,15 +44,12 @@ func.func @map_nested_foreach_to_threads_excessive_threads(%x: memref<2 x 32 x f

return %y : memref<2 x 32 x f32>
}
transform.with_pdl_patterns {
^bb0(%arg0: !pdl.operation):
transform.sequence %arg0 : !pdl.operation failures(propagate) {
^bb1(%arg1: !pdl.operation):
%funcop = transform.structured.match ops{["gpu.launch"]} in %arg0
// expected-error @below {{Trying to launch a GPU kernel with gridDim = (1, 1, 1) blockDim = (1200, 9, 1). It is larger than the limits.}}
// expected-note @below {{"blockDim" is very large}}
transform.gpu.map_nested_foreach_to_threads %funcop { blockDim = [1200, 9, 1] }
}
transform.sequence failures(propagate) {
^bb1(%arg0: !pdl.operation):
%funcop = transform.structured.match ops{["gpu.launch"]} in %arg0
// expected-error @below {{Trying to launch a GPU kernel with gridDim = (1, 1, 1) blockDim = (1200, 9, 1). It is larger than the limits.}}
// expected-note @below {{"blockDim" is very large}}
transform.gpu.map_nested_foreach_to_threads %funcop { blockDim = [1200, 9, 1] }
}

// -----
Expand Down Expand Up @@ -88,14 +85,12 @@ func.func @map_nested_foreach_to_threads_fewer_threads(%x: memref<2 x 32 x f32>,

return %y : memref<2 x 32 x f32>
}
transform.with_pdl_patterns {
^bb0(%arg0: !pdl.operation):
transform.sequence %arg0 : !pdl.operation failures(propagate) {
^bb1(%arg1: !pdl.operation):
%funcop = transform.structured.match ops{["gpu.launch"]} in %arg0
// expected-error @below {{The requested GPU threads are fewer than the number of loop trip counts. Try to tile scf.foreach_thread before mapping or set small blockDim.}}
transform.gpu.map_nested_foreach_to_threads %funcop { blockDim = [128, 4, 1] }
}

transform.sequence failures(propagate) {
^bb1(%arg0: !pdl.operation):
%funcop = transform.structured.match ops{["gpu.launch"]} in %arg0
// expected-error @below {{The requested GPU threads are fewer than the number of loop trip counts. Try to tile scf.foreach_thread before mapping or set small blockDim.}}
transform.gpu.map_nested_foreach_to_threads %funcop { blockDim = [128, 4, 1] }
}

// -----
Expand All @@ -117,14 +112,11 @@ func.func @map_nested_foreach_to_threads_dynamic_trip_count(%x: memref<2 x 32 x
return %y : memref<2 x 32 x f32>
}

transform.with_pdl_patterns {
^bb0(%arg0: !pdl.operation):
transform.sequence %arg0 : !pdl.operation failures(propagate) {
^bb1(%arg1: !pdl.operation):
%funcop = transform.structured.match ops{["gpu.launch"]} in %arg0
// expected-error @below {{unsupported dynamic blockdim size}}
transform.gpu.map_nested_foreach_to_threads %funcop { blockDim = [128, 4, 1] }
}
transform.sequence failures(propagate) {
^bb1(%arg0: !pdl.operation):
%funcop = transform.structured.match ops{["gpu.launch"]} in %arg0
// expected-error @below {{unsupported dynamic blockdim size}}
transform.gpu.map_nested_foreach_to_threads %funcop { blockDim = [128, 4, 1] }
}

// -----
Expand All @@ -145,14 +137,11 @@ func.func @map_nested_foreach_to_threads_4d_loop(%x: memref<2x32x32x32xf32>, %y:
return %y : memref<2x32x32x32xf32>
}

transform.with_pdl_patterns {
^bb0(%arg0: !pdl.operation):
transform.sequence %arg0 : !pdl.operation failures(propagate) {
^bb1(%arg1: !pdl.operation):
%funcop = transform.structured.match ops{["gpu.launch"]} in %arg0
// expected-error @below {{scf.foreach_thread with rank > 3 does not lower to gpu.thread_id}}
transform.gpu.map_nested_foreach_to_threads %funcop { blockDim = [128, 4, 1] }
}
transform.sequence failures(propagate) {
^bb1(%arg0: !pdl.operation):
%funcop = transform.structured.match ops{["gpu.launch"]} in %arg0
// expected-error @below {{scf.foreach_thread with rank > 3 does not lower to gpu.thread_id}}
transform.gpu.map_nested_foreach_to_threads %funcop { blockDim = [128, 4, 1] }
}

// -----
Expand All @@ -168,16 +157,13 @@ func.func @map_nested_foreach_to_threads_not_buffer(%x: tensor<32x32xf32>, %y: t
return
}

transform.with_pdl_patterns {
^bb0(%arg0: !pdl.operation):
transform.sequence %arg0 : !pdl.operation failures(propagate) {
^bb1(%arg1: !pdl.operation):
%matmul = transform.structured.match ops{["linalg.matmul"]} in %arg0
%foreach, %tiled = transform.structured.tile_to_foreach_thread_op %matmul num_threads [10, 20, 30]
%funcop = transform.structured.match ops{["gpu.launch"]} in %arg0
// expected-error @below {{only bufferized scf.foreach_thread lowers to gpu.thread_id}}
transform.gpu.map_nested_foreach_to_threads %funcop { blockDim = [128, 4, 1] }
}
transform.sequence failures(propagate) {
^bb1(%arg0: !pdl.operation):
%matmul = transform.structured.match ops{["linalg.matmul"]} in %arg0
%foreach, %tiled = transform.structured.tile_to_foreach_thread_op %matmul num_threads [10, 20, 30]
%funcop = transform.structured.match ops{["gpu.launch"]} in %arg0
// expected-error @below {{only bufferized scf.foreach_thread lowers to gpu.thread_id}}
transform.gpu.map_nested_foreach_to_threads %funcop { blockDim = [128, 4, 1] }
}

// -----
Expand Down
46 changes: 17 additions & 29 deletions mlir/test/Dialect/GPU/transform-gpu.mlir
Expand Up @@ -30,13 +30,10 @@ func.func @saxpy2dblock(%x: !type, %y: !type, %t: !type1d, %alpha : f32, %stream
return %y : !type
}

transform.with_pdl_patterns {
^bb0(%arg0: !pdl.operation):
transform.sequence %arg0 : !pdl.operation failures(propagate) {
^bb1(%arg1: !pdl.operation):
%funcop = transform.structured.match ops{["gpu.launch"]} in %arg0
transform.gpu.map_foreach_to_blocks %funcop { blockDim = [12, 9, 1]}
}
transform.sequence failures(propagate) {
^bb1(%arg0: !pdl.operation):
%funcop = transform.structured.match ops{["gpu.launch"]} in %arg0
transform.gpu.map_foreach_to_blocks %funcop { blockDim = [12, 9, 1]}
}

// -----
Expand Down Expand Up @@ -87,13 +84,10 @@ func.func @saxpy2d(%x: !type, %y: !type, %t: !type1d, %alpha : f32, %stream : !g
return %y : !type
}

transform.with_pdl_patterns {
^bb0(%arg0: !pdl.operation):
transform.sequence %arg0 : !pdl.operation failures(propagate) {
^bb1(%arg1: !pdl.operation):
%funcop = transform.structured.match ops{["gpu.launch"]} in %arg0
transform.gpu.map_nested_foreach_to_threads %funcop { blockDim = [12, 9, 1] }
}
transform.sequence failures(propagate) {
^bb1(%arg0: !pdl.operation):
%funcop = transform.structured.match ops{["gpu.launch"]} in %arg0
transform.gpu.map_nested_foreach_to_threads %funcop { blockDim = [12, 9, 1] }
}

// -----
Expand Down Expand Up @@ -129,14 +123,11 @@ func.func @saxpy4d(%x: !type4d, %y: !type4d, %alpha : f32) -> !type4d {
return %y : !type4d
}

transform.with_pdl_patterns {
^bb0(%arg0: !pdl.operation):
transform.sequence %arg0 : !pdl.operation failures(propagate) {
^bb1(%arg1: !pdl.operation):
%funcop = transform.structured.match ops{["func.func"]} in %arg0
%gpuLaunch = transform.gpu.map_foreach_to_blocks %funcop { generate_gpu_launch }
transform.gpu.map_nested_foreach_to_threads %gpuLaunch { blockDim = [32, 4, 1] }
}
transform.sequence failures(propagate) {
^bb1(%arg0: !pdl.operation):
%funcop = transform.structured.match ops{["func.func"]} in %arg0
%gpuLaunch = transform.gpu.map_foreach_to_blocks %funcop { generate_gpu_launch }
transform.gpu.map_nested_foreach_to_threads %gpuLaunch { blockDim = [32, 4, 1] }
}

// -----
Expand Down Expand Up @@ -166,11 +157,8 @@ func.func @saxpy2d_no_barrier(%x: !type, %y: !type, %t: !type1d, %alpha : f32, %
return %y : !type
}

transform.with_pdl_patterns {
^bb0(%arg0: !pdl.operation):
transform.sequence %arg0 : !pdl.operation failures(propagate) {
^bb1(%arg1: !pdl.operation):
%funcop = transform.structured.match ops{["gpu.launch"]} in %arg0
transform.gpu.map_nested_foreach_to_threads %funcop { blockDim = [12, 9, 1], syncAfterDistribute = false }
}
transform.sequence failures(propagate) {
^bb1(%arg0: !pdl.operation):
%funcop = transform.structured.match ops{["gpu.launch"]} in %arg0
transform.gpu.map_nested_foreach_to_threads %funcop { blockDim = [12, 9, 1], syncAfterDistribute = false }
}
31 changes: 14 additions & 17 deletions mlir/test/Dialect/Linalg/multisize-tiling-full.mlir
@@ -1,22 +1,19 @@
// RUN: mlir-opt --test-transform-dialect-interpreter --canonicalize %s | FileCheck %s

transform.with_pdl_patterns {
^bb0(%arg0: !pdl.operation):
// This implements a 2D multisize tiling with target sizes [3, 10].
transform.sequence %arg0 : !pdl.operation failures(propagate) {
^bb1(%arg1: !pdl.operation):
%0 = transform.structured.match ops{["linalg.generic"]} in %arg1
%1:3 = transform.structured.multitile_sizes %0 { dimension = 0, target_size = 3}
%t:3 = transform.structured.multitile_sizes %0 { dimension = 1, target_size = 10}
%2:2 = transform.structured.split %0 after %1#2 { dimension = 0 }
%3:2 = transform.structured.tile %2#0 [%1#0]
%4:2 = transform.structured.tile %2#1 [%1#1]
%5 = merge_handles %3#0, %4#0 : !pdl.operation
%tt:3 = replicate num(%5) %t#0, %t#1, %t#2 : !pdl.operation, !pdl.operation, !pdl.operation, !pdl.operation
%6:2 = transform.structured.split %5 after %tt#2 { dimension = 1 }
transform.structured.tile %6#0 [0, %tt#0]
transform.structured.tile %6#1 [0, %tt#1]
}
// This implements a 2D multisize tiling with target sizes [3, 10].
transform.sequence failures(propagate) {
^bb1(%arg1: !pdl.operation):
%0 = transform.structured.match ops{["linalg.generic"]} in %arg1
%1:3 = transform.structured.multitile_sizes %0 { dimension = 0, target_size = 3}
%t:3 = transform.structured.multitile_sizes %0 { dimension = 1, target_size = 10}
%2:2 = transform.structured.split %0 after %1#2 { dimension = 0 }
%3:2 = transform.structured.tile %2#0 [%1#0]
%4:2 = transform.structured.tile %2#1 [%1#1]
%5 = merge_handles %3#0, %4#0 : !pdl.operation
%tt:3 = replicate num(%5) %t#0, %t#1, %t#2 : !pdl.operation, !pdl.operation, !pdl.operation, !pdl.operation
%6:2 = transform.structured.split %5 after %tt#2 { dimension = 1 }
transform.structured.tile %6#0 [0, %tt#0]
transform.structured.tile %6#1 [0, %tt#1]
}

func.func private @elem(%arg0: f32, %arg1: index, %arg2: index) -> f32
Expand Down
34 changes: 12 additions & 22 deletions mlir/test/Dialect/Linalg/promote.mlir
Expand Up @@ -66,13 +66,10 @@ func.func @matmul_f32(%A: memref<?xi8>, %M: index, %N: index, %K: index) {
// CHECK-NOT: memref.dealloc %[[tmpB]] : memref<48xi8>
// CHECK-NOT: memref.dealloc %[[tmpC]] : memref<24xi8>

transform.with_pdl_patterns {
^bb0(%arg0: !pdl.operation):
sequence %arg0 : !pdl.operation failures(propagate) {
^bb0(%arg1: !pdl.operation):
%0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
%1 = transform.structured.promote %0 { use_alloca }
}
transform.sequence failures(propagate) {
^bb0(%arg1: !pdl.operation):
%0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
%1 = transform.structured.promote %0 { use_alloca }
}

// -----
Expand Down Expand Up @@ -139,16 +136,12 @@ func.func @matmul_f64(%A: memref<?xi8>, %M: index, %N: index, %K: index) {
// CHECK: memref.dealloc %[[tmpB_f64]] : memref<96xi8>
// CHECK: memref.dealloc %[[tmpC_f64]] : memref<48xi8>

transform.with_pdl_patterns {
^bb0(%arg0: !pdl.operation):
sequence %arg0 : !pdl.operation failures(propagate) {
^bb0(%arg1: !pdl.operation):
%0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
%1 = transform.structured.promote %0
}
transform.sequence failures(propagate) {
^bb0(%arg1: !pdl.operation):
%0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
%1 = transform.structured.promote %0
}


// -----

#map6 = affine_map<(d0, d1, d2) -> (d0, d2)>
Expand Down Expand Up @@ -189,11 +182,8 @@ func.func @promote_rank_reducing_subviews(%arg0: memref<?x?x?x64xf32, strided<[
return
}

transform.with_pdl_patterns {
^bb0(%arg0: !pdl.operation):
sequence %arg0 : !pdl.operation failures(propagate) {
^bb0(%arg1: !pdl.operation):
%0 = transform.structured.match interface{LinalgOp} in %arg1
%1 = transform.structured.promote %0
}
transform.sequence failures(propagate) {
^bb0(%arg1: !pdl.operation):
%0 = transform.structured.match interface{LinalgOp} in %arg1
%1 = transform.structured.promote %0
}
13 changes: 5 additions & 8 deletions mlir/test/Dialect/Linalg/promotion_options.mlir
Expand Up @@ -31,12 +31,9 @@ func.func @gemm(%a : memref<?x?xf32>, %b : memref<?x?xf32>, %c : memref<?x?xf32>
// CHECK: memref.dealloc %[[A0]]
// CHECK: memref.dealloc %[[A1]]

transform.with_pdl_patterns {
^bb0(%arg0: !pdl.operation):
sequence %arg0 : !pdl.operation failures(propagate) {
^bb0(%arg1: !pdl.operation):
%0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
%1, %loops:3 = transform.structured.tile %0 [16, 16, 16]
%2 = transform.structured.promote %1 { operands_to_promote = [0, 2], force_full_tiles = [false, false] }
}
transform.sequence failures(propagate) {
^bb0(%arg1: !pdl.operation):
%0 = transform.structured.match ops{["linalg.matmul"]} in %arg1
%1, %loops:3 = transform.structured.tile %0 [16, 16, 16]
%2 = transform.structured.promote %1 { operands_to_promote = [0, 2], force_full_tiles = [false, false] }
}

0 comments on commit 2e9abc0

Please sign in to comment.