Skip to content

Commit

Permalink
[mlir] update types in remaining Linalg TransformOps test
Browse files Browse the repository at this point in the history
All ops now support explicit type specification, update types to use
`!transform.any_op` instead of `!pdl.operation` for consistency.

Depends On D144515

Reviewed By: nicolasvasilache

Differential Revision: https://reviews.llvm.org/D150592
  • Loading branch information
ftynse committed May 16, 2023
1 parent 2fe4d90 commit f52b638
Show file tree
Hide file tree
Showing 17 changed files with 319 additions and 319 deletions.
44 changes: 22 additions & 22 deletions mlir/test/Dialect/Linalg/convert-conv2d-to-img2col.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,10 @@ func.func @conv_non_static(%arg0: tensor<?x?x?x?xf32>, %arg1: tensor<3x3x4x16xf3
}

transform.sequence failures(propagate) {
^bb1(%arg1: !pdl.operation):
%0 = transform.structured.match ops{["linalg.conv_2d_nhwc_hwcf"]} in %arg1 : (!pdl.operation) -> !pdl.operation
^bb1(%arg1: !transform.any_op):
%0 = transform.structured.match ops{["linalg.conv_2d_nhwc_hwcf"]} in %arg1 : (!transform.any_op) -> !transform.any_op
// expected-error@below {{failed to apply}}
%1:2 = transform.structured.convert_conv2d_to_img2col %0 : (!pdl.operation) -> (!pdl.operation, !pdl.operation)
%1:2 = transform.structured.convert_conv2d_to_img2col %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
}

// -----
Expand Down Expand Up @@ -88,11 +88,11 @@ func.func @conv_16433136(%arg0: tensor<1x16x16x4xf32>, %arg1: tensor<3x3x4x16xf3
}

transform.sequence failures(propagate) {
^bb1(%arg1: !pdl.operation):
%0 = transform.structured.match ops{["linalg.conv_2d_nhwc_hwcf"]} in %arg1 : (!pdl.operation) -> !pdl.operation
%img2col_tensor_producer, %transformed = transform.structured.convert_conv2d_to_img2col %0 : (!pdl.operation) -> (!pdl.operation, !pdl.operation)
transform.print %img2col_tensor_producer {name = "tensor_producer"}: !pdl.operation
transform.print %transformed {name = "transformed"}: !pdl.operation
^bb1(%arg1: !transform.any_op):
%0 = transform.structured.match ops{["linalg.conv_2d_nhwc_hwcf"]} in %arg1 : (!transform.any_op) -> !transform.any_op
%img2col_tensor_producer, %transformed = transform.structured.convert_conv2d_to_img2col %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
transform.print %img2col_tensor_producer {name = "tensor_producer"}: !transform.any_op
transform.print %transformed {name = "transformed"}: !transform.any_op
}

// -----
Expand Down Expand Up @@ -167,9 +167,9 @@ func.func @depthwise_conv_hwc_114x16x3(%input: tensor<1x114x114x16xf32>, %filter
}

transform.sequence failures(propagate) {
^bb1(%arg1: !pdl.operation):
%0 = transform.structured.match ops{["linalg.depthwise_conv_2d_nhwc_hwc"]} in %arg1 : (!pdl.operation) -> !pdl.operation
%1:2 = transform.structured.convert_conv2d_to_img2col %0 : (!pdl.operation) -> (!pdl.operation, !pdl.operation)
^bb1(%arg1: !transform.any_op):
%0 = transform.structured.match ops{["linalg.depthwise_conv_2d_nhwc_hwc"]} in %arg1 : (!transform.any_op) -> !transform.any_op
%1:2 = transform.structured.convert_conv2d_to_img2col %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
}

// -----
Expand Down Expand Up @@ -209,9 +209,9 @@ func.func @batch_nhwc_conv(%arg0: tensor<8x16x16x4xf32>, %arg1: tensor<3x3x4x16x
}

transform.sequence failures(propagate) {
^bb1(%arg1: !pdl.operation):
%0 = transform.structured.match ops{["linalg.conv_2d_nhwc_hwcf"]} in %arg1 : (!pdl.operation) -> !pdl.operation
%1:2 = transform.structured.convert_conv2d_to_img2col %0 : (!pdl.operation) -> (!pdl.operation, !pdl.operation)
^bb1(%arg1: !transform.any_op):
%0 = transform.structured.match ops{["linalg.conv_2d_nhwc_hwcf"]} in %arg1 : (!transform.any_op) -> !transform.any_op
%1:2 = transform.structured.convert_conv2d_to_img2col %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
}

// -----
Expand Down Expand Up @@ -272,9 +272,9 @@ func.func @batch_nchw_conv(%arg0: tensor<8x4x16x16xf32>, %arg1: tensor<16x4x3x3x
}

transform.sequence failures(propagate) {
^bb1(%arg1: !pdl.operation):
%0 = transform.structured.match ops{["linalg.conv_2d_nchw_fchw"]} in %arg1 : (!pdl.operation) -> !pdl.operation
%1:2 = transform.structured.convert_conv2d_to_img2col %0 : (!pdl.operation) -> (!pdl.operation, !pdl.operation)
^bb1(%arg1: !transform.any_op):
%0 = transform.structured.match ops{["linalg.conv_2d_nchw_fchw"]} in %arg1 : (!transform.any_op) -> !transform.any_op
%1:2 = transform.structured.convert_conv2d_to_img2col %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
}

// -----
Expand Down Expand Up @@ -308,9 +308,9 @@ func.func @conv_integer_extend(%arg0: tensor<1x16x16x4xi8>, %arg1: tensor<3x3x4x
}

transform.sequence failures(propagate) {
^bb1(%arg1: !pdl.operation):
%0 = transform.structured.match ops{["linalg.conv_2d_nhwc_hwcf"]} in %arg1 : (!pdl.operation) -> !pdl.operation
%img2col_tensor_producer, %transformed = transform.structured.convert_conv2d_to_img2col %0 : (!pdl.operation) -> (!pdl.operation, !pdl.operation)
transform.print %img2col_tensor_producer {name = "tensor_producer"}: !pdl.operation
transform.print %transformed {name = "transformed"}: !pdl.operation
^bb1(%arg1: !transform.any_op):
%0 = transform.structured.match ops{["linalg.conv_2d_nhwc_hwcf"]} in %arg1 : (!transform.any_op) -> !transform.any_op
%img2col_tensor_producer, %transformed = transform.structured.convert_conv2d_to_img2col %0 : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
transform.print %img2col_tensor_producer {name = "tensor_producer"}: !transform.any_op
transform.print %transformed {name = "transformed"}: !transform.any_op
}
54 changes: 27 additions & 27 deletions mlir/test/Dialect/Linalg/hoisting.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -75,11 +75,11 @@ func.func @hoist_vector_transfer_pairs(
}

transform.sequence failures(propagate) {
^bb1(%arg1: !pdl.operation):
^bb1(%arg1: !transform.any_op):
%0 = transform.structured.match ops{["func.func"]} in %arg1
: (!pdl.operation) -> !pdl.operation
: (!transform.any_op) -> !transform.any_op
transform.structured.hoist_redundant_vector_transfers %0
: (!pdl.operation) -> !pdl.operation
: (!transform.any_op) -> !transform.any_op
}

// -----
Expand Down Expand Up @@ -164,11 +164,11 @@ func.func @hoist_vector_transfer_pairs_disjoint(
}

transform.sequence failures(propagate) {
^bb1(%arg1: !pdl.operation):
^bb1(%arg1: !transform.any_op):
%0 = transform.structured.match ops{["func.func"]} in %arg1
: (!pdl.operation) -> !pdl.operation
: (!transform.any_op) -> !transform.any_op
transform.structured.hoist_redundant_vector_transfers %0
: (!pdl.operation) -> !pdl.operation
: (!transform.any_op) -> !transform.any_op
}

// -----
Expand Down Expand Up @@ -209,11 +209,11 @@ func.func @hoist_vector_transfer_pairs_in_affine_loops(%memref0: memref<64x64xi3
}

transform.sequence failures(propagate) {
^bb1(%arg1: !pdl.operation):
^bb1(%arg1: !transform.any_op):
%0 = transform.structured.match ops{["func.func"]} in %arg1
: (!pdl.operation) -> !pdl.operation
: (!transform.any_op) -> !transform.any_op
transform.structured.hoist_redundant_vector_transfers %0
: (!pdl.operation) -> !pdl.operation
: (!transform.any_op) -> !transform.any_op
}

// -----
Expand Down Expand Up @@ -298,11 +298,11 @@ func.func @hoist_vector_transfer_pairs_tensor(
}

transform.sequence failures(propagate) {
^bb1(%arg1: !pdl.operation):
^bb1(%arg1: !transform.any_op):
%0 = transform.structured.match ops{["func.func"]} in %arg1
: (!pdl.operation) -> !pdl.operation
: (!transform.any_op) -> !transform.any_op
transform.structured.hoist_redundant_tensor_subsets %0
: (!pdl.operation) -> ()
: (!transform.any_op) -> ()
}

// -----
Expand Down Expand Up @@ -393,11 +393,11 @@ func.func @hoist_vector_transfer_pairs_disjoint_tensor(
}

transform.sequence failures(propagate) {
^bb1(%arg1: !pdl.operation):
^bb1(%arg1: !transform.any_op):
%0 = transform.structured.match ops{["func.func"]} in %arg1
: (!pdl.operation) -> !pdl.operation
: (!transform.any_op) -> !transform.any_op
transform.structured.hoist_redundant_tensor_subsets %0
: (!pdl.operation) -> ()
: (!transform.any_op) -> ()
}

// -----
Expand Down Expand Up @@ -510,11 +510,11 @@ func.func @hoist_vector_transfer_pairs_tensor_and_slices(
}

transform.sequence failures(propagate) {
^bb1(%arg1: !pdl.operation):
^bb1(%arg1: !transform.any_op):
%0 = transform.structured.match ops{["func.func"]} in %arg1
: (!pdl.operation) -> !pdl.operation
: (!transform.any_op) -> !transform.any_op
transform.structured.hoist_redundant_tensor_subsets %0
: (!pdl.operation) -> ()
: (!transform.any_op) -> ()
}

// -----
Expand Down Expand Up @@ -557,11 +557,11 @@ func.func @hoist_vector_transfer_write_pairs_disjoint_tensor(
}

transform.sequence failures(propagate) {
^bb1(%arg1: !pdl.operation):
^bb1(%arg1: !transform.any_op):
%0 = transform.structured.match ops{["func.func"]} in %arg1
: (!pdl.operation) -> !pdl.operation
: (!transform.any_op) -> !transform.any_op
transform.structured.hoist_redundant_tensor_subsets %0
: (!pdl.operation) -> ()
: (!transform.any_op) -> ()
}

// -----
Expand Down Expand Up @@ -670,11 +670,11 @@ func.func @hoist_vector_transfer_pairs_tensor_and_slices_static_large_tensor(
}

transform.sequence failures(propagate) {
^bb1(%arg1: !pdl.operation):
^bb1(%arg1: !transform.any_op):
%0 = transform.structured.match ops{["func.func"]} in %arg1
: (!pdl.operation) -> !pdl.operation
: (!transform.any_op) -> !transform.any_op
transform.structured.hoist_redundant_tensor_subsets %0
: (!pdl.operation) -> ()
: (!transform.any_op) -> ()
}

// -----
Expand Down Expand Up @@ -716,9 +716,9 @@ func.func @hoist_vector_transfer_read() {
}

transform.sequence failures(propagate) {
^bb1(%arg1: !pdl.operation):
^bb1(%arg1: !transform.any_op):
%0 = transform.structured.match ops{["func.func"]} in %arg1
: (!pdl.operation) -> !pdl.operation
: (!transform.any_op) -> !transform.any_op
transform.structured.hoist_redundant_vector_transfers %0
: (!pdl.operation) -> !pdl.operation
: (!transform.any_op) -> !transform.any_op
}
48 changes: 24 additions & 24 deletions mlir/test/Dialect/Linalg/multisize-tiling-full.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -3,18 +3,18 @@

// This implements a 2D multisize tiling with target sizes [3, 10].
transform.sequence failures(propagate) {
^bb1(%arg1: !pdl.operation):
%0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!pdl.operation) -> !pdl.operation
%1:3 = transform.structured.multitile_sizes %0 { dimension = 0, target_size = 3} : (!pdl.operation) -> !pdl.operation
%t:3 = transform.structured.multitile_sizes %0 { dimension = 1, target_size = 10} : (!pdl.operation) -> !pdl.operation
%2:2 = transform.structured.split %0 after %1#2 { dimension = 0 } : !pdl.operation, !pdl.operation
%3:2 = transform.structured.tile %2#0 [%1#0] : (!pdl.operation, !pdl.operation) -> (!pdl.operation, !pdl.operation)
%4:2 = transform.structured.tile %2#1 [%1#1] : (!pdl.operation, !pdl.operation) -> (!pdl.operation, !pdl.operation)
%5 = merge_handles %3#0, %4#0 : !pdl.operation
%tt:3 = replicate num(%5) %t#0, %t#1, %t#2 : !pdl.operation, !pdl.operation, !pdl.operation, !pdl.operation
%6:2 = transform.structured.split %5 after %tt#2 { dimension = 1 } : !pdl.operation, !pdl.operation
transform.structured.tile %6#0 [0, %tt#0] : (!pdl.operation, !pdl.operation) -> (!pdl.operation, !pdl.operation)
transform.structured.tile %6#1 [0, %tt#1] : (!pdl.operation, !pdl.operation) -> (!pdl.operation, !pdl.operation)
^bb1(%arg1: !transform.any_op):
%0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
%1:3 = transform.structured.multitile_sizes %0 { dimension = 0, target_size = 3} : (!transform.any_op) -> !transform.any_op
%t:3 = transform.structured.multitile_sizes %0 { dimension = 1, target_size = 10} : (!transform.any_op) -> !transform.any_op
%2:2 = transform.structured.split %0 after %1#2 { dimension = 0 } : !transform.any_op, !transform.any_op
%3:2 = transform.structured.tile %2#0 [%1#0] : (!transform.any_op, !transform.any_op) -> (!transform.any_op, !transform.any_op)
%4:2 = transform.structured.tile %2#1 [%1#1] : (!transform.any_op, !transform.any_op) -> (!transform.any_op, !transform.any_op)
%5 = merge_handles %3#0, %4#0 : !transform.any_op
%tt:3 = replicate num(%5) %t#0, %t#1, %t#2 : !transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op
%6:2 = transform.structured.split %5 after %tt#2 { dimension = 1 } : !transform.any_op, !transform.any_op
transform.structured.tile %6#0 [0, %tt#0] : (!transform.any_op, !transform.any_op) -> (!transform.any_op, !transform.any_op)
transform.structured.tile %6#1 [0, %tt#1] : (!transform.any_op, !transform.any_op) -> (!transform.any_op, !transform.any_op)
}

func.func private @elem(%arg0: f32, %arg1: index, %arg2: index) -> f32
Expand Down Expand Up @@ -103,18 +103,18 @@ func.func @two_d(%arg0: tensor<10x34xf32>,
// -----

transform.sequence failures(propagate) {
^bb1(%arg1: !pdl.operation):
%0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!pdl.operation) -> !pdl.operation
%1:3 = transform.structured.multitile_sizes %0 { dimension = 0, target_size = 3} : (!pdl.operation) -> !transform.param<i64>
%t:3 = transform.structured.multitile_sizes %0 { dimension = 1, target_size = 10} : (!pdl.operation) -> !transform.param<i64>
%2:2 = transform.structured.split %0 after %1#2 { dimension = 0 } : !pdl.operation, !transform.param<i64>
%3:2 = transform.structured.tile %2#0 [%1#0] : (!pdl.operation, !transform.param<i64>) -> (!pdl.operation, !pdl.operation)
%4:2 = transform.structured.tile %2#1 [%1#1] : (!pdl.operation, !transform.param<i64>) -> (!pdl.operation, !pdl.operation)
%5 = merge_handles %3#0, %4#0 : !pdl.operation
%tt:3 = replicate num(%5) %t#0, %t#1, %t#2 : !pdl.operation, !transform.param<i64>, !transform.param<i64>, !transform.param<i64>
%6:2 = transform.structured.split %5 after %tt#2 { dimension = 1 } : !pdl.operation, !transform.param<i64>
transform.structured.tile %6#0 [0, %tt#0] : (!pdl.operation, !transform.param<i64>) -> (!pdl.operation, !pdl.operation)
transform.structured.tile %6#1 [0, %tt#1] : (!pdl.operation, !transform.param<i64>) -> (!pdl.operation, !pdl.operation)
^bb1(%arg1: !transform.any_op):
%0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
%1:3 = transform.structured.multitile_sizes %0 { dimension = 0, target_size = 3} : (!transform.any_op) -> !transform.param<i64>
%t:3 = transform.structured.multitile_sizes %0 { dimension = 1, target_size = 10} : (!transform.any_op) -> !transform.param<i64>
%2:2 = transform.structured.split %0 after %1#2 { dimension = 0 } : !transform.any_op, !transform.param<i64>
%3:2 = transform.structured.tile %2#0 [%1#0] : (!transform.any_op, !transform.param<i64>) -> (!transform.any_op, !transform.any_op)
%4:2 = transform.structured.tile %2#1 [%1#1] : (!transform.any_op, !transform.param<i64>) -> (!transform.any_op, !transform.any_op)
%5 = merge_handles %3#0, %4#0 : !transform.any_op
%tt:3 = replicate num(%5) %t#0, %t#1, %t#2 : !transform.any_op, !transform.param<i64>, !transform.param<i64>, !transform.param<i64>
%6:2 = transform.structured.split %5 after %tt#2 { dimension = 1 } : !transform.any_op, !transform.param<i64>
transform.structured.tile %6#0 [0, %tt#0] : (!transform.any_op, !transform.param<i64>) -> (!transform.any_op, !transform.any_op)
transform.structured.tile %6#1 [0, %tt#1] : (!transform.any_op, !transform.param<i64>) -> (!transform.any_op, !transform.any_op)
}

func.func private @elem(%arg0: f32, %arg1: index, %arg2: index) -> f32
Expand Down
6 changes: 3 additions & 3 deletions mlir/test/Dialect/Linalg/tile-conv.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,9 @@ func.func @conv(%arg0 : memref<?x?xf32>, %arg1 : memref<?x?xf32>, %arg2 : memref
}

transform.sequence failures(propagate) {
^bb0(%arg1: !pdl.operation):
%0 = transform.structured.match ops{["linalg.conv_2d"]} in %arg1 : (!pdl.operation) -> !pdl.operation
%1, %loop:2 = transform.structured.tile %0 [2, 3] : (!pdl.operation) -> (!pdl.operation, !pdl.operation, !pdl.operation)
^bb0(%arg1: !transform.any_op):
%0 = transform.structured.match ops{["linalg.conv_2d"]} in %arg1 : (!transform.any_op) -> !transform.any_op
%1, %loop:2 = transform.structured.tile %0 [2, 3] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
}

// CHECK: func @conv
Expand Down
12 changes: 6 additions & 6 deletions mlir/test/Dialect/Linalg/tile-indexed.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,9 @@ func.func @indexed_vector(%arg0: memref<50xindex>) {
}

transform.sequence failures(propagate) {
^bb0(%arg1: !pdl.operation):
%0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!pdl.operation) -> !pdl.operation
%1, %loop = transform.structured.tile %0 [10] : (!pdl.operation) -> (!pdl.operation, !pdl.operation)
^bb0(%arg1: !transform.any_op):
%0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
%1, %loop = transform.structured.tile %0 [10] : (!transform.any_op) -> (!transform.any_op, !transform.any_op)
}

// TILE-10n25-DAG: [[$MAP:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0 + d1)>
Expand Down Expand Up @@ -42,9 +42,9 @@ func.func @indexed_matrix(%arg0: memref<50x50xindex>) {
}

transform.sequence failures(propagate) {
^bb0(%arg1: !pdl.operation):
%0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!pdl.operation) -> !pdl.operation
%1, %loop:2 = transform.structured.tile %0 [10, 25] : (!pdl.operation) -> (!pdl.operation, !pdl.operation, !pdl.operation)
^bb0(%arg1: !transform.any_op):
%0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
%1, %loop:2 = transform.structured.tile %0 [10, 25] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op)
}

// TILE-10n25-DAG: [[$MAP:#[a-zA-Z0-9_]*]] = affine_map<(d0, d1) -> (d0 + d1)>
Expand Down
18 changes: 9 additions & 9 deletions mlir/test/Dialect/Linalg/tile-tensors.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,9 @@ func.func @matmul_tensors(
}

transform.sequence failures(propagate) {
^bb0(%arg1: !pdl.operation):
%0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!pdl.operation) -> !pdl.operation
%1, %loops:3 = transform.structured.tile %0 [2, 3, 4] : (!pdl.operation) -> (!pdl.operation, !pdl.operation, !pdl.operation, !pdl.operation)
^bb0(%arg1: !transform.any_op):
%0 = transform.structured.match ops{["linalg.matmul"]} in %arg1 : (!transform.any_op) -> !transform.any_op
%1, %loops:3 = transform.structured.tile %0 [2, 3, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
}

// -----
Expand Down Expand Up @@ -59,9 +59,9 @@ func.func @generic_op_tensors(
}

transform.sequence failures(propagate) {
^bb0(%arg1: !pdl.operation):
%0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!pdl.operation) -> !pdl.operation
%1, %loops:3 = transform.structured.tile %0 [2, 3, 4] : (!pdl.operation) -> (!pdl.operation, !pdl.operation, !pdl.operation, !pdl.operation)
^bb0(%arg1: !transform.any_op):
%0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
%1, %loops:3 = transform.structured.tile %0 [2, 3, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
}

// CHECK-LABEL: func @generic_op_tensors
Expand Down Expand Up @@ -130,7 +130,7 @@ func.func @fold_extract_slice(
}

transform.sequence failures(propagate) {
^bb0(%arg1: !pdl.operation):
%0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!pdl.operation) -> !pdl.operation
%1, %loops:3 = transform.structured.tile %0 [2, 3, 4] : (!pdl.operation) -> (!pdl.operation, !pdl.operation, !pdl.operation, !pdl.operation)
^bb0(%arg1: !transform.any_op):
%0 = transform.structured.match ops{["linalg.generic"]} in %arg1 : (!transform.any_op) -> !transform.any_op
%1, %loops:3 = transform.structured.tile %0 [2, 3, 4] : (!transform.any_op) -> (!transform.any_op, !transform.any_op, !transform.any_op, !transform.any_op)
}
Loading

0 comments on commit f52b638

Please sign in to comment.