Skip to content

Commit

Permalink
[mlir][Linalg] Make printer/parser have the same behavior.
Browse files Browse the repository at this point in the history
The parser of generic op did not recognize the output from mlir-opt when there
are multiple outputs. One would wrap the result types with braces, and one would
not. The patch makes the behavior the same.

Reviewed By: mravishankar

Differential Revision: https://reviews.llvm.org/D104256
  • Loading branch information
hanhanW committed Jun 14, 2021
1 parent e0c382a commit e3bc4db
Show file tree
Hide file tree
Showing 6 changed files with 43 additions and 11 deletions.
5 changes: 2 additions & 3 deletions mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3067,9 +3067,8 @@ parseNamedStructuredOpRegion(OpAsmParser &parser, Region &region,
static ParseResult
parseNamedStructuredOpResults(OpAsmParser &parser,
SmallVectorImpl<Type> &resultTypes) {
if (succeeded(parser.parseOptionalArrow()))
if (parser.parseTypeList(resultTypes))
return failure();
if (parser.parseOptionalArrowTypeList(resultTypes))
return failure();
return success();
}

Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Dialect/Linalg/bufferize.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ func @multiple_results(%arg0: tensor<4xf32>) -> (tensor<4xf32>, tensor<4xf32>) {
^bb0(%gen_arg1: f32, %out1: f32, %out2: f32):
%tmp1 = math.exp %gen_arg1 : f32
linalg.yield %tmp1, %tmp1 : f32, f32
} -> tensor<4xf32>, tensor<4xf32>
} -> (tensor<4xf32>, tensor<4xf32>)
return %0, %1 : tensor<4xf32>, tensor<4xf32>
}

Expand Down Expand Up @@ -118,7 +118,7 @@ func @dynamic_results(%arg0: tensor<?x?xf32>)
^bb0(%gen_arg1: f32, %out1: f32, %out2: f32):
%tmp1 = math.exp %gen_arg1 : f32
linalg.yield %tmp1, %tmp1 : f32, f32
} -> tensor<?x?xf32>, tensor<?x?xf32>
} -> (tensor<?x?xf32>, tensor<?x?xf32>)
return %0, %1 : tensor<?x?xf32>, tensor<?x?xf32>
}

Expand Down
6 changes: 3 additions & 3 deletions mlir/test/Dialect/Linalg/canonicalize.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -714,7 +714,7 @@ func @init_tensor_dim_of_linalg_result(%arg_0 : tensor<?xf32>,
outs(%arg_0, %arg_1 : tensor<?xf32>, tensor<?xf32>) {
^bb0(%in: f32, %out_0: f32, %out_1: f32):
linalg.yield %in, %in : f32, f32
} -> tensor<?xf32>, tensor<?xf32>
} -> (tensor<?xf32>, tensor<?xf32>)

%c0 = constant 0 : index
%num_elem_0 = memref.dim %0, %c0 : tensor<?xf32>
Expand Down Expand Up @@ -778,7 +778,7 @@ func @remove_no_op(%arg0 : tensor<?x?x?xf32>, %arg1 : tensor<?x?x?xf32>)
outs(%3, %3 : tensor<?x?x?xf32>, tensor<?x?x?xf32>) {
^bb0(%arg2 : f32, %arg3 : f32, %arg4 : f32, %arg5 : f32):
linalg.yield %arg3, %arg2 : f32, f32
} -> tensor<?x?x?xf32>, tensor<?x?x?xf32>
} -> (tensor<?x?x?xf32>, tensor<?x?x?xf32>)
return %4, %5 : tensor<?x?x?xf32>, tensor<?x?x?xf32>
}
// CHECK-LABEL: func @remove_no_op
Expand Down Expand Up @@ -832,7 +832,7 @@ func @keep_not_noop(%arg0 : tensor<?x?xf32>, %arg1 : tensor<?x?xf32>)
outs(%2, %2 : tensor<?x?xf32>, tensor<?x?xf32>) {
^bb0(%arg3: f32, %arg4 : f32, %arg5 : f32, %arg6 : f32):
linalg.yield %arg2, %arg4 : f32, f32
} -> tensor<?x?xf32>, tensor<?x?xf32>
} -> (tensor<?x?xf32>, tensor<?x?xf32>)
return %3#0, %3#1 : tensor<?x?xf32>, tensor<?x?xf32>
}
// CHECK-LABEL: func @keep_not_noop
Expand Down
2 changes: 1 addition & 1 deletion mlir/test/Dialect/Linalg/invalid.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -449,7 +449,7 @@ func @named_ops(%a3: memref<?x?x?xf32>, %b3: memref<?x?xf32>, %c3: memref<?x?x?x
func @incorrect_region_arg_count(%m: memref<?x?xf32>) {
// expected-error @+3 {{region expects 3 args, got 2}}
%res = linalg.matmul ins(%m, %m : memref<?x?xf32>, memref<?x?xf32>)
-> tensor<?x?xf32>, tensor<?x?xf32>
-> (tensor<?x?xf32>, tensor<?x?xf32>)
return
}

Expand Down
33 changes: 33 additions & 0 deletions mlir/test/Dialect/Linalg/roundtrip.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -424,6 +424,39 @@ func @generic_with_tensor_input_and_output(

// -----

func @generic_with_multiple_tensor_outputs(
%arg0: tensor<?xi32>, %arg1: tensor<?xi32>, %arg2: i32)
-> (tensor<i32>, tensor<i32>) {
%c0 = constant 0 : index
%0 = linalg.init_tensor [] : tensor<i32>
%1 = linalg.fill(%0, %arg2) : tensor<i32>, i32 -> tensor<i32>
%2 = linalg.init_tensor [] : tensor<i32>
%3 = linalg.fill(%2, %arg2) : tensor<i32>, i32 -> tensor<i32>
%4:2 = linalg.generic {
indexing_maps = [affine_map<(d0) -> (d0)>, affine_map<(d0) -> (d0)>, affine_map<(d0) -> ()>, affine_map<(d0) -> ()>],
iterator_types = ["reduction"]}
ins(%arg0, %arg1 : tensor<?xi32>, tensor<?xi32>)
outs(%1, %3 : tensor<i32>, tensor<i32>) {
^bb0(%arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32): // no predecessors
%5 = cmpi sge, %arg3, %arg5 : i32
%6 = select %5, %arg3, %arg5 : i32
%7 = cmpi eq, %arg3, %arg5 : i32
%8 = cmpi slt, %arg4, %arg6 : i32
%9 = select %8, %arg4, %arg6 : i32
%10 = select %5, %arg4, %arg6 : i32
%11 = select %7, %9, %10 : i32
linalg.yield %6, %11 : i32, i32
} -> (tensor<i32>, tensor<i32>)
return %4#0, %4#1 : tensor<i32>, tensor<i32>
}
// CHECK-LABEL: func @generic_with_multiple_tensor_outputs
// CHECK: %{{.*}} = linalg.generic {
// CHECK-SAME: ins({{.*}} : tensor<?xi32>, tensor<?xi32>)
// CHECK-SAME: outs({{.*}} : tensor<i32>, tensor<i32>)
// CHECK: } -> (tensor<i32>, tensor<i32>)

// -----

#accesses_2 = [
affine_map<(i, j, k) -> (j, i)>,
affine_map<(i, j, k) -> (i, k, i + j)>,
Expand Down
4 changes: 2 additions & 2 deletions mlir/test/Dialect/Linalg/vectorization.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -386,9 +386,9 @@ func @generic_vectorize_tensor(%arg0: tensor<4x256xf32>,
// CHECK: %[[R9:.*]] = vector.transfer_write %[[TAN]], %[[ARG0]][%[[C0]], %[[C0]]] {{.*}} : vector<4x256xf32>, tensor<4x256xf32>
linalg.yield %6, %8, %c1_f32, %9, %10, %11, %12, %13, %14, %15 : f32, f32,
f32, f32, f32, f32, f32, f32, f32, f32
} -> tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>,
} -> (tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>,
tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>,
tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>
tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>)
// CHECK: return %[[R0]], %[[R1]], %[[R2]], %[[R3]], %[[R4]], %[[R5]], %[[R6]], %[[R7]], %[[R8]], %[[R9]] : tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>
return %r#0, %r#1, %r#2, %r#3, %r#4, %r#5, %r#6, %r#7, %r#8, %r#9:
tensor<4x256xf32>, tensor<4x256xf32>, tensor<4x256xf32>,
Expand Down

0 comments on commit e3bc4db

Please sign in to comment.